repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nrhine1/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
rodorad/spark-tk | regression-tests/sparktkregtests/testcases/dicom/save_load_dicom_test.py | 13 | 4193 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.save and dicom.load functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import dicom
import numpy
import datetime
class SaveLoadDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(SaveLoadDicomTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
# generate a unique name to save the dicom object under
self.location = self.get_file(self.get_name("save_load_test"))
def test_basic_save_load_content_test(self):
"""basic save load content test"""
# save the current dicom object under a unique name
self.dicom.save(self.location)
# load the dicom
load_dicom = self.context.load(self.location)
original_metadata = self.dicom.metadata.to_pandas()["metadata"]
load_metadata = load_dicom.metadata.to_pandas()["metadata"]
# compare the loaded dicom object with the dicom object we created
for (load_row, original_row) in zip(original_metadata, load_metadata):
original_file = original_row.encode("ascii", "ignore")
# extract and remove bulk data element from metadata since we don't care about it
# bulk data records the file's location, so it may differ
loaded_file = load_row.encode("ascii", "ignore")
bulk_data_index = original_file.index("<BulkData")
load_bulk_data = loaded_file[bulk_data_index:bulk_data_index + loaded_file[bulk_data_index:].index(">") + 1]
original_bulk_data = original_file[bulk_data_index:bulk_data_index + original_file[bulk_data_index:].index(">") + 1]
loaded_file = loaded_file.replace(load_bulk_data, "")
original_file = original_file.replace(original_bulk_data, "")
self.assertEqual(loaded_file, original_file)
# now we check that the pixel data matches
original_image = self.dicom.pixeldata.to_pandas()
loaded_image = load_dicom.pixeldata.to_pandas()
for (dcm_image, pixel_image) in zip(original_image["imagematrix"], loaded_image["imagematrix"]):
numpy.testing.assert_equal(pixel_image, dcm_image)
def test_save_invalid_long_unicode_name(self):
"""save under a long unicode name, should fail"""
# we will pass the dicom metadata itself as the name
metadata_unicode = self.dicom.metadata.to_pandas()["metadata"]
with self.assertRaisesRegexp(Exception, "does not exist"):
self.dicom.save(metadata_unicode)
def test_load_does_not_exist(self):
"""test load dicom does not exist"""
with self.assertRaisesRegexp(Exception, "Input path does not exist"):
self.context.load("does_not_exist")
def test_save_invalid_path_type(self):
"""test save dicom invalid path type"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.dicom.save(1)
def test_save_name_already_exists(self):
"""test save dicom duplicate name"""
with self.assertRaisesRegexp(Exception, "already exists"):
self.dicom.save("duplicate_name")
self.dicom.save("duplicate_name")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
macks22/gensim | gensim/sklearn_api/hdp.py | 1 | 4164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class HdpTransformer(TransformerMixin, BaseEstimator):
"""
Base HDP module
"""
def __init__(self, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150,
alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
"""
Sklearn api for HDP model. See gensim.models.HdpModel for parameter details.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.HdpModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(X)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus, id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
check = lambda x: [x] if isinstance(x[0], tuple) else x
docs = check(docs)
X = [[] for _ in range(0, len(docs))]
max_num_topics = 0
for k, v in enumerate(docs):
X[k] = self.gensim_model[v]
max_num_topics = max(max_num_topics, max(x[0] for x in X[k]) + 1)
for k, v in enumerate(X):
# returning dense representation for compatibility with sklearn but we should go back to sparse representation in the future
dense_vec = matutils.sparse2full(v, max_num_topics)
X[k] = dense_vec
return np.reshape(np.array(X), (len(docs), max_num_topics))
def partial_fit(self, X):
"""
Train model over X.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(X)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
self.gensim_model.update(corpus=X)
return self
| lgpl-2.1 |
SummaLabs/DLS | app/backend/core/models/keras_trainer_v4.py | 1 | 30962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__='ar'
import re
import sys
import os
import glob
import time
import json
import numpy as np
import skimage.io as io
import skimage.color as skcolor
import skimage.transform as sktransform
import matplotlib.pyplot as plt
from keras import backend as K
import keras
from keras.models import Sequential
from keras.layers import Convolution1D, Convolution2D, Convolution3D,\
MaxPooling1D, MaxPooling2D, MaxPooling3D,\
AveragePooling1D,AveragePooling2D, AveragePooling3D,\
InputLayer, Flatten, Merge, Activation, Dense, Dropout
# from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
from keras.models import model_from_json
from keras.optimizers import Optimizer
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax
from app.backend.core import utils as dlsutils
from batcher_image2d import BatcherImage2DLMDB
# from flow_parser import getKerasOptimizerName
from flow_parser_helper_opt import getOptimizerJson2Keras, getKerasOptimizerName
from cfg import CFG_MODEL_TRAIN, CFG_SOLVER
#########################
def split_list_by_blocks(lst, psiz):
"""
Split list by cuts fixed size psize (last cut can be less than psize),
:param lst: input list
:param psiz: size of cut
:return: cutted-list
"""
tret = [lst[x:x + psiz] for x in xrange(0, len(lst), psiz)]
return tret
def findLayerFromEndByType(model, layerType):
for ii,ll in enumerate(model.layers[::-1]):
if isinstance(ll, layerType):
return (len(model.layers) - ii - 1)
return -1
def cloneLayerFromLayer(pLayer):
if isinstance(pLayer, Convolution1D):
return Convolution1D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution2D):
return Convolution2D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution3D):
return Convolution3D.from_config(pLayer.get_config())
# Max-Pooling:
elif isinstance(pLayer, MaxPooling1D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling2D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling3D):
return MaxPooling3D.from_config(pLayer.get_config())
# Average-Pooling
elif isinstance(pLayer, AveragePooling1D):
return AveragePooling1D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling2D):
return AveragePooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling3D):
return AveragePooling3D.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Flatten):
return Flatten.from_config(pLayer.get_config())
elif isinstance(pLayer, Merge):
return Merge.from_config(pLayer.get_config())
elif isinstance(pLayer, Activation):
return Activation.from_config(pLayer.get_config())
elif isinstance(pLayer, Dropout):
return Dropout.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Dense):
return Dense.from_config(pLayer.get_config())
return None
#########################
class KerasTrainer:
extModelWeights = 'h5kerasmodel'
extJsonTrainConfig = '_trainconfig.json'
extJsonSolverState = '_solverstate.json'
modelPrefix=''
batcherLMDB = None
pathModelConfig=None
model=None
outputDir=None
sizeBatch=32
numEpoch=1
numIterPerEpoch=0
intervalSaveModel=1
intervalValidation=1
currentIter=0
currentEpoch=0
printInterval=20
modelName="Unknown"
deviceType='cpu'
def __init__(self):
self.cleanResults()
@staticmethod
def adjustModelInputOutput2DBData(parModel, parLMDB, isFixOutputLayer = True):
# (1) check LMDB is object instance or path to DB
if isinstance(parLMDB, BatcherImage2DLMDB):
ptrLMDB = parLMDB
elif (isinstance(parLMDB, str) or isinstance(parLMDB, unicode)):
ptrLMDB = BatcherImage2DLMDB(parLMDB, 1)
else:
raise Exception("Unknown parLMDB instance")
# (2) Build Sequential model (currently only Sequential models supported)
retModel = Sequential()
tmpL0 = parModel.layers[0]
# (3) if InputLayer is present - skip it
if isinstance(tmpL0, InputLayer):
idxStart=1
else:
idxStart=0
# (4) Recreate new InputShape layer with DB input shape
retModel.add(InputLayer(input_shape=ptrLMDB.shapeImg))
#FIXME: check this code, do you think, that implicit layer resizing is a good idea?
# (5) find output Dense layer to automaticaly adjust his output with DB-output
idxDense = -1
if isFixOutputLayer:
idxDense = findLayerFromEndByType(parModel, keras.layers.Dense)
if idxDense<0:
raise Exception('Model without Dense layer currently not supported!')
listLayers = parModel.layers[idxStart:idxDense]
else:
listLayers = parModel.layers[idxStart:]
# (6) Re-create model layers
for ll in listLayers:
ll.inbound_nodes = []
# print ('\tadd [%s]' % (ll.__str__()))
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
# (7) fix output dimension
if isFixOutputLayer and idxDense>0:
#FIXME: hak for classification model-task
tmpLayer = parModel.layers[idxDense]
tmpLayer.inbound_nodes = []
tmpLayerConfig = tmpLayer.get_config()
#FIXME: check Keras 'output_dim' paremater
tmpLayerConfig['output_dim'] = ptrLMDB.numLbl
retModel.add(Dense.from_config(tmpLayerConfig))
for ll in parModel.layers[idxDense+1:]:
ll.inbound_nodes = []
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
#
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# if isFixOutputLayer:
# retModel.add(Dense(ptrLMDB.numLbl, activation='softmax'))
return retModel
def buildModel(self, pathLMDBJob, pathModelConfig,
sizeBatch, numEpoch, intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model', isResizeInputLayerToImageShape=True):
if self.isOk():
self.cleanModel()
self.loadBatcherLMDB(pathLMDBJob, sizeBatch)
with open(pathModelConfig, 'r') as f:
modelJSON = f.read()
modelFromCfg = model_from_json(modelJSON)
if modelFromCfg is not None:
self.pathModelConfig = pathModelConfig
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
# FIXME: check this point: need more accurate logic to sync Data-Shape and Model-Input-Shape
# if isResizeInputLayerToImageShape:
# tmpL0 = modelFromCfg.layers[0]
# tmpL0cfg = tmpL0.get_config()
# if re.match(r'dense_input*', tmpL0.input.name) is not None:
# tmpShapeImageSize = np.prod(self.lmdbReader.shapeImg)
# self.model = Sequential()
# self.model.add(
# Dense(tmpL0cfg['output_dim'], input_dim=tmpShapeImageSize, init=tmpL0cfg['init']))
# for ll in modelFromCfg.layers[1:]:
# self.model.add(ll)
# else:
# self.model = modelFromCfg
# else:
# self.model = modelFromCfg
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# self.model.add(Dense(self.lmdbReader.numLbl))
# self.model.add(Activation('softmax'))
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
def buildModelFromConfigs(self, paramBatcherLMDB, modelConfig,
sizeBatch, numEpoch,
modelOptimizer=None,
intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model',
isAppendOutputLayer = True):
self.batcherLMDB = paramBatcherLMDB
modelFromCfg = modelConfig
if modelFromCfg is not None:
self.pathModelConfig = None
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB, isFixOutputLayer=isAppendOutputLayer)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
if modelOptimizer is None:
opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
else:
opt = modelOptimizer
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
def isOk(self):
return ((self.batcherLMDB is not None) and (self.model is not None))
def loadBatcherLMDB(self, dbJobID, sizeBatch):
dirDataset=dlsutils.getPathForDatasetDir()
pathLMDBJob = os.path.join(dirDataset, dbJobID)
self.batcherLMDB = BatcherImage2DLMDB(pathLMDBJob, sizeBatch)
self.sizeBatch = sizeBatch
if not self.batcherLMDB.isOk():
strErr = "[KERAS-TRAINER] Incorrect LMDB-data in [%s]" % dbJobID
self.printError(strErr)
raise Exception(strErr)
def cleanResults(self):
self.trainLog={'epoch':[], 'iter':[], 'lossTrain':[], 'accTrain':[], 'lossVal':[], 'accVal':[]}
self.currentIter=0
self.currentEpoch=0
def cleanModel(self):
if self.isOk():
self.cleanResults()
self.model = None
self.batcherLMDB.close()
self.batcherLMDB = None
self.pathModelConfig = None
def printError(self, strError):
print("keras-error#%s" % strError)
def trainOneIter(self):
modelInputShape = list(self.model.input_shape)
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
isNeedPrintInfo = False
if (self.currentIter % self.printInterval == 0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(float(tlossTrain[0]))
self.trainLog['accTrain'].append(float(tlossTrain[1]))
self.trainLog['lossVal'].append(float(tlossVal[0]))
self.trainLog['accVal'].append(float(tlossVal[1]))
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
isNeedPrintInfo = True
self.currentIter += 1
return isNeedPrintInfo
def trainOneEpoch(self):
if not self.isOk():
strErr='KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
modelInputShape = list(self.model.input_shape)
for ii in xrange(self.numIterPerEpoch):
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
if (self.currentIter%self.printInterval==0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(tlossTrain[0])
self.trainLog['accTrain'].append(tlossTrain[1])
self.trainLog['lossVal'].append(tlossVal[0])
self.trainLog['accVal'].append(tlossVal[1])
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
self.currentIter +=1
self.currentEpoch += 1
def convertImgUint8ToDBImage(self, pimg):
#FIXME: shape we can get from Batcher and from model.layers...
if len(self.batcherLMDB.shapeImg) < 3:
numCh = 1
else:
# FIXME: check this point, number of channels can be on last element on array...
numCh = self.batcherLMDB.shapeImg[0]
# check #channels of input image
if len(pimg.shape) < 3:
numChImg = 1
else:
numChImg = 3
# if #channels of input image is not equal to #channels in TrainDatabse, then convert shape inp Image to Database-Shape
if numCh != numChImg:
if numCh == 1:
# FIXME: this is fix potential bug: rgb2gray change automaticaly min/max range from (0,255) to (0,1), headbang!
pimg = skcolor.rgb2gray(pimg.astype(np.float))
else:
pimg = skcolor.gray2rgb(pimg)
timg = sktransform.resize(pimg.astype(np.float32) * self.batcherLMDB.scaleFactor, self.batcherLMDB.shapeImg[1:])
if numCh==1:
timg = timg.reshape([1] + list(timg.shape))
else:
timg = timg.transpose((2, 0, 1))
if self.batcherLMDB.isRemoveMean:
# FIXME: check this point: type of the mean-removing from one cofig (for train and inference stages)
timg -= self.batcherLMDB.meanChImage
return timg
def inferListImagePath(self, listPathToImages, batchSizeInfer=None):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if batchSizeInfer is None:
batchSizeInfer = self.sizeBatch
splListPathToImages = split_list_by_blocks(listPathToImages, batchSizeInfer)
retProb = None
for idxBatch,lstPath in enumerate(splListPathToImages):
modelInputShape = list(self.model.input_shape)
# Fit batchSize to current number of images in list (lstPath)
tmpBatchSize = len(lstPath)
tdataX=None
for ppi,ppath in enumerate(lstPath):
timg = io.imread(ppath)
if timg is None:
strError = 'Cant read input image [%s], may be image is incorrect' % ppath
self.printError(strError)
raise Exception(strError)
timg = self.convertImgUint8ToDBImage(timg)
# Delayed initialization of Batch of Input-Data
if tdataX is None:
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[ppi] = timg
#FIXME: chack this point, this code tested on Fully-Connected NN, need tests for Convolution Neurel Networks
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# tprob = self.model.predict(tdataX, batch_size=tmpBatchSize)
tprob = self.model.predict(tdataX)
# Delayed initialization of returned classification probability
if retProb is None:
retProb = tprob
else:
retProb = np.concatenate(retProb, tprob)
idxMax = np.argmax(retProb, axis=1)
retLbl = np.array(self.batcherLMDB.lbl)[idxMax]
retVal = np.max(retProb, axis=1)
ret = {
'prob' : retProb,
'label' : retLbl,
'val' : retVal
}
return ret
def inferOneImageU8_DebugActivations(self, imgu8):
# [BEGIN] this code is cloned from self.inferOneImageU8()
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# [END] this code is cloned from self.inferOneImageU8()
lstLayerForK=[]
for ii in xrange(len(self.model.layers)):
lstLayerForK.append(self.model.layers[ii].output)
localGetActivations = K.function([self.model.layers[0].input], lstLayerForK)
dataActivations = localGetActivations([tdataX])
return dataActivations
def inferOneImageU8(self, imgu8):
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
tprob = self.model.predict(tdataX, batch_size=1)
posMax = np.argmax(tprob[0])
tlbl = self.batcherLMDB.lbl[posMax]
tval = tprob[0][posMax]
tret = {
'prob': tprob,
'label': tlbl,
'val': tval
}
return tret
def inferOneImagePath(self, pathToImage):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if not os.path.isfile(pathToImage):
strError='Cant find input image [%s]' % pathToImage
self.printError(strError)
raise Exception(strError)
timgu8 = io.imread(pathToImage)
if timgu8 is None:
strError = 'Cant read input image [%s], may be image is incorrect' % pathToImage
self.printError(strError)
raise Exception(strError)
return self.inferOneImageU8(timgu8)
def inferOneImagePathSorted(self, pathToImage):
tret = self.inferOneImagePath(pathToImage)
tarrProb=tret['prob'][0]
sortedIdx = np.argsort(-tarrProb)
sortedLbl = np.array(self.batcherLMDB.lbl)[sortedIdx]
sortedProb = tarrProb[sortedIdx]
tmp = [(ll,pp) for ll,pp in zip(sortedLbl,sortedProb)]
ret = {
'best': {
'label': tret['label'],
'prob': tret['val']
},
'distrib': tmp
}
return ret
def saveModelState(self, parOutputDir=None, isSaveWeights=True):
if parOutputDir is not None:
if not os.path.isdir(parOutputDir):
strError = "Cant find directory [%s]" % parOutputDir
self.printError(strError)
raise Exception(strError)
self.outputDir = parOutputDir
foutModelCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonTrainConfig))
foutSolverCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonSolverState))
foutModelWeights=os.path.join(self.outputDir,'%s_iter_%06d.%s' % (self.modelPrefix,self.currentIter,self.extModelWeights))
#
#FIXME: this is temporary solution, fix this in the future!
tmpOptimizerCfg = self.model.optimizer.get_config()
tmpOptimizerCfg['name'] = getKerasOptimizerName(self.model.optimizer)
jsonSolverState={
'optimizer' : tmpOptimizerCfg,
'loss' : self.model.loss,
'metrics' : self.model.metrics_names,
'dataset-id' : self.batcherLMDB.cfg.dbId,
'pathModelConfig' : "%s" % os.path.basename(self.pathModelConfig),
'sizeBatch' : self.sizeBatch,
'numEpoch' : self.numEpoch,
'currentIter' : self.currentIter,
'intervalSaveModel' : self.intervalSaveModel,
'intervalValidation': self.intervalValidation,
'printInterval' : self.printInterval,
'modelPrefix' : "%s" % self.modelPrefix,
'modelName' : self.modelName,
'deviceType' : self.deviceType
}
# FIXME: check the necesserity of the item [pathModelConfig]
txtJsonSolverState = json.dumps(jsonSolverState, indent=4)
with open(foutSolverCfg, 'w') as fslv:
fslv.write(txtJsonSolverState)
#
with open(foutModelCfg, 'w') as fcfg:
fcfg.write(self.model.to_json(sort_keys=True, indent=4, separators=(',', ': ')))
if isSaveWeights:
self.model.save_weights(foutModelWeights, overwrite=True)
# Print message when model saved (for Digits)
print(("keras-savestate#%s#%s#%s|%s|%s") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
os.path.abspath(foutModelCfg),
os.path.abspath(foutSolverCfg),
os.path.abspath(foutModelWeights)
))
def getTrainingStatesInDir(self, pathTrainDir, isReturnAllWeightsPath=False):
"""
explore directory with training-output data, and return path to files
:param pathTrainDir: path to directory with training-output
:return: None or list [pathModelConfigJson, pathSolverStateJson, pathModelWeights]
"""
if not os.path.isdir(pathTrainDir):
strError = "Cant find directory [%s]" % pathTrainDir
self.printError(strError)
return None
lstModelConfig = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonTrainConfig))
lstSolverStates = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonSolverState))
lstModelWeights = glob.glob('%s/*_iter_[0-9]*.%s' % (pathTrainDir, self.extModelWeights))
if len(lstModelConfig)<1:
strError = 'Cant find ModelConfig [%s] files in directory [%s]' % (self.extJsonTrainConfig, pathTrainDir)
self.printError(strError)
return None
if len(lstSolverStates)<1:
strError = 'Cant find Solver-States [%s] files in directory [%s]' % (self.extJsonSolverState, pathTrainDir)
self.printError(strError)
return None
if len(lstModelWeights) < 1:
strError = 'Cant find Model-Weights [%s] files in directory [%s]' % (self.extModelWeights, pathTrainDir)
self.printError(strError)
return None
lstModelConfig = sorted(lstModelConfig)
lstSolverStates = sorted(lstSolverStates)
lstModelWeights = sorted(lstModelWeights)
pathModelConfig = lstModelConfig[-1]
pathSolverState = lstSolverStates[-1]
if not isReturnAllWeightsPath:
pathModelWeight = lstModelWeights[-1]
else:
pathModelWeight = lstModelWeights
return [pathModelConfig, pathSolverState, pathModelWeight]
def loadModelFromTrainingStateInDir(self, pathTrainDir, isLoadLMDBReader=True):
self.cleanModel()
stateConfigs = self.getTrainingStatesInDir(pathTrainDir)
if stateConfigs is None:
strError = 'Cant find Model saved state from directory [%s]' % pathTrainDir
self.printError(strError)
pathModelConfig = stateConfigs[0]
pathSolverState = stateConfigs[1]
pathModelWeight = stateConfigs[2]
self.loadModelFromTrainingState(pathModelConfig=pathModelConfig,
pathSolverState=pathSolverState,
pathModelWeight=pathModelWeight,
isLoadLMDBReader=isLoadLMDBReader)
def loadModelFromTaskModelDir(self, pathTaskDir):
pathConfigModel = os.path.join(pathTaskDir, CFG_MODEL_TRAIN)
pathConfigSolver = os.path.join(pathTaskDir, CFG_SOLVER)
self.loadModelFromTrainingState(pathModelConfig=pathConfigModel,
pathSolverState=pathConfigSolver)
self.outputDir = pathTaskDir
def loadModelFromTrainingState(self, pathModelConfig, pathSolverState,
pathModelWeight=None, pathLMDBDataset=None, isLoadLMDBReader=True):
"""
Load Keras Model from Trained state (if present path to model Weights), or
for initial config
:param pathModelConfig: path to Model Config in JSON format
:param pathSolverState: path to SolverState Config in JSON format
:param pathModelWeight: path to Model Weights as binary Keras dump
:param pathModelWeight: path to LMDB-Dataset, if None -> skip
:param isLoadLMDBReader: load or not LMDBReader from SolverState Config
:return: None
"""
self.cleanModel()
# (1) Load Model Config from Json:
with open(pathModelConfig, 'r') as fModelConfig:
tmpStr = fModelConfig.read()
self.model = keras.models.model_from_json(tmpStr)
if self.model is None:
strError = 'Invalid Model config in file [%s]' % pathModelConfig
self.printError(strError)
raise Exception(strError)
# (2) Load SoverState Config from Json:
with open(pathSolverState) as fSolverState:
tmpStr = fSolverState.read()
configSolverState = json.loads(tmpStr)
if configSolverState is None:
strError = 'Invalid SolverState config in file [%s]' % pathSolverState
self.printError(strError)
raise Exception(strError)
if pathLMDBDataset is not None:
configSolverState['dataset-id'] = pathLMDBDataset
# (3) Load Model Weights:
if pathModelWeight is not None:
self.model.load_weights(pathModelWeight)
# (4) Reconfigure Model State:
self.intervalSaveModel = configSolverState['intervalSaveModel']
self.intervalValidation = configSolverState['intervalValidation']
self.numEpoch = configSolverState['numEpoch']
self.currentIter = configSolverState['currentIter']
self.sizeBatch = configSolverState['sizeBatch']
self.modelPrefix = configSolverState['modelPrefix']
if 'modelName' in configSolverState.keys():
self.modelName = configSolverState['modelName']
if 'deviceType' in configSolverState.keys():
self.deviceType = configSolverState['deviceType']
if isLoadLMDBReader:
self.loadBatcherLMDB(configSolverState['dataset-id'], self.sizeBatch)
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.currentEpoch = np.floor(self.currentIter / self.numIterPerEpoch)
else:
self.numIterPerEpoch = 1
self.currentEpoch = 0
self.pathModelConfig = pathModelConfig
# (5) Configure Loss, Solver, Metrics and compile model
tmpCfgOptimizer = configSolverState['optimizer'].copy()
parOptimizer = keras.optimizers.get(tmpCfgOptimizer)
parLoss = configSolverState['loss']
# parMetrics = configSolverState['metrics']
#TODO: i think this is a bug or a bad realization in Keras: 'loss' is an unknown metrics, this is temporary fix
parMetrics = []
if 'acc' in configSolverState['metrics']:
parMetrics.append('accuracy')
self.model.compile(optimizer=parOptimizer, loss=parLoss, metrics=parMetrics)
def runTrain(self, paramNumEpoch=-1):
if not self.isOk():
strErr = 'KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
if paramNumEpoch>0:
self.numEpoch = paramNumEpoch
for ei in xrange(self.numEpoch):
self.trainOneEpoch()
if (ei%self.intervalSaveModel)==0:
self.saveModelState()
if (ei%self.intervalValidation)==0:
pass
#########################
if __name__ == '__main__':
pass | mit |
qingshuimonk/STA663 | ae.py | 1 | 4091 | # -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import matplotlib
# matplotlib.use('PS')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 8
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
# with tf.Session() as sess:
sess = tf.Session()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
plt.figure(figsize=(8, 2))
for i in range(8):
ax = plt.subplot(2, 8, i+1)
plt.imshow(mnist.test.images[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax = plt.subplot(2, 8, 8+i+1)
plt.imshow(encode_decode[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.show() | mit |
carlthome/librosa | librosa/display.py | 1 | 30778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Display
=======
.. autosummary::
:toctree: generated/
specshow
waveplot
cmap
TimeFormatter
NoteFormatter
LogHzFormatter
ChromaFormatter
TonnetzFormatter
"""
import warnings
import numpy as np
from matplotlib.cm import get_cmap
from matplotlib.axes import Axes
from matplotlib.ticker import Formatter, ScalarFormatter
from matplotlib.ticker import LogLocator, FixedLocator, MaxNLocator
from matplotlib.ticker import SymmetricalLogLocator
from . import core
from . import util
from .util.exceptions import ParameterError
__all__ = ['specshow',
'waveplot',
'cmap',
'TimeFormatter',
'NoteFormatter',
'LogHzFormatter',
'ChromaFormatter',
'TonnetzFormatter']
class TimeFormatter(Formatter):
'''A tick formatter for time axes.
Automatically switches between seconds, minutes:seconds,
or hours:minutes:seconds.
Parameters
----------
lag : bool
If `True`, then the time axis is interpreted in lag coordinates.
Anything past the midpoint will be converted to negative time.
unit : str or None
Abbreviation of the physical unit for axis labels and ticks.
Either equal to `s` (seconds) or `ms` (milliseconds) or None (default).
If set to None, the resulting TimeFormatter object adapts its string
representation to the duration of the underlying time range:
`hh:mm:ss` above 3600 seconds; `mm:ss` between 60 and 3600 seconds;
and `ss` below 60 seconds.
See also
--------
matplotlib.ticker.Formatter
Examples
--------
For normal time
>>> import matplotlib.pyplot as plt
>>> times = np.arange(30)
>>> values = np.random.randn(len(times))
>>> plt.figure()
>>> ax = plt.gca()
>>> ax.plot(times, values)
>>> ax.xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> ax.set_xlabel('Time')
>>> plt.show()
Manually set the physical time unit of the x-axis to milliseconds
>>> times = np.arange(100)
>>> values = np.random.randn(len(times))
>>> plt.figure()
>>> ax = plt.gca()
>>> ax.plot(times, values)
>>> ax.xaxis.set_major_formatter(librosa.display.TimeFormatter(unit='ms'))
>>> ax.set_xlabel('Time (ms)')
For lag plots
>>> times = np.arange(60)
>>> values = np.random.randn(len(times))
>>> plt.figure()
>>> ax = plt.gca()
>>> ax.plot(times, values)
>>> ax.xaxis.set_major_formatter(librosa.display.TimeFormatter(lag=True))
>>> ax.set_xlabel('Lag')
'''
def __init__(self, lag=False, unit=None):
if unit not in ['s', 'ms', None]:
raise ParameterError('Unknown time unit: {}'.format(unit))
self.unit = unit
self.lag = lag
def __call__(self, x, pos=None):
'''Return the time format as pos'''
_, dmax = self.axis.get_data_interval()
vmin, vmax = self.axis.get_view_interval()
# In lag-time axes, anything greater than dmax / 2 is negative time
if self.lag and x >= dmax * 0.5:
# In lag mode, don't tick past the limits of the data
if x > dmax:
return ''
value = np.abs(x - dmax)
# Do we need to tweak vmin/vmax here?
sign = '-'
else:
value = x
sign = ''
if self.unit == 's':
s = '{:.3g}'.format(value)
elif self.unit == 'ms':
s = '{:.3g}'.format(value * 1000)
else:
if vmax - vmin > 3600:
s = '{:d}:{:02d}:{:02d}'.format(int(value / 3600.0),
int(np.mod(value / 60.0, 60)),
int(np.mod(value, 60)))
elif vmax - vmin > 60:
s = '{:d}:{:02d}'.format(int(value / 60.0),
int(np.mod(value, 60)))
else:
s = '{:.2g}'.format(value)
return '{:s}{:s}'.format(sign, s)
class NoteFormatter(Formatter):
'''Ticker formatter for Notes
Parameters
----------
octave : bool
If `True`, display the octave number along with the note name.
Otherwise, only show the note name (and cent deviation)
major : bool
If `True`, ticks are always labeled.
If `False`, ticks are only labeled if the span is less than 2 octaves
See also
--------
LogHzFormatter
matplotlib.ticker.Formatter
Examples
--------
>>> import matplotlib.pyplot as plt
>>> values = librosa.midi_to_hz(np.arange(48, 72))
>>> plt.figure()
>>> ax1 = plt.subplot(2,1,1)
>>> ax1.bar(np.arange(len(values)), values)
>>> ax1.set_ylabel('Hz')
>>> ax2 = plt.subplot(2,1,2)
>>> ax2.bar(np.arange(len(values)), values)
>>> ax2.yaxis.set_major_formatter(librosa.display.NoteFormatter())
>>> ax2.set_ylabel('Note')
>>> plt.show()
'''
def __init__(self, octave=True, major=True):
self.octave = octave
self.major = major
def __call__(self, x, pos=None):
if x <= 0:
return ''
# Only use cent precision if our vspan is less than an octave
vmin, vmax = self.axis.get_view_interval()
if not self.major and vmax > 4 * max(1, vmin):
return ''
cents = vmax <= 2 * max(1, vmin)
return core.hz_to_note(int(x), octave=self.octave, cents=cents)
class LogHzFormatter(Formatter):
'''Ticker formatter for logarithmic frequency
Parameters
----------
major : bool
If `True`, ticks are always labeled.
If `False`, ticks are only labeled if the span is less than 2 octaves
See also
--------
NoteFormatter
matplotlib.ticker.Formatter
Examples
--------
>>> import matplotlib.pyplot as plt
>>> values = librosa.midi_to_hz(np.arange(48, 72))
>>> plt.figure()
>>> ax1 = plt.subplot(2,1,1)
>>> ax1.bar(np.arange(len(values)), values)
>>> ax1.yaxis.set_major_formatter(librosa.display.LogHzFormatter())
>>> ax1.set_ylabel('Hz')
>>> ax2 = plt.subplot(2,1,2)
>>> ax2.bar(np.arange(len(values)), values)
>>> ax2.yaxis.set_major_formatter(librosa.display.NoteFormatter())
>>> ax2.set_ylabel('Note')
>>> plt.show()
'''
def __init__(self, major=True):
self.major = major
def __call__(self, x, pos=None):
if x <= 0:
return ''
vmin, vmax = self.axis.get_view_interval()
if not self.major and vmax > 4 * max(1, vmin):
return ''
return '{:g}'.format(x)
class ChromaFormatter(Formatter):
'''A formatter for chroma axes
See also
--------
matplotlib.ticker.Formatter
Examples
--------
>>> import matplotlib.pyplot as plt
>>> values = np.arange(12)
>>> plt.figure()
>>> ax = plt.gca()
>>> ax.plot(values)
>>> ax.yaxis.set_major_formatter(librosa.display.ChromaFormatter())
>>> ax.set_ylabel('Pitch class')
>>> plt.show()
'''
def __call__(self, x, pos=None):
'''Format for chroma positions'''
return core.midi_to_note(int(x), octave=False, cents=False)
class TonnetzFormatter(Formatter):
'''A formatter for tonnetz axes
See also
--------
matplotlib.ticker.Formatter
Examples
--------
>>> import matplotlib.pyplot as plt
>>> values = np.arange(6)
>>> plt.figure()
>>> ax = plt.gca()
>>> ax.plot(values)
>>> ax.yaxis.set_major_formatter(librosa.display.TonnetzFormatter())
>>> ax.set_ylabel('Tonnetz')
>>> plt.show()
'''
def __call__(self, x, pos=None):
'''Format for tonnetz positions'''
return [r'5$_x$', r'5$_y$', r'm3$_x$',
r'm3$_y$', r'M3$_x$', r'M3$_y$'][int(x)]
def cmap(data, robust=True, cmap_seq='magma', cmap_bool='gray_r', cmap_div='coolwarm'):
'''Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap.
Otherwise, use a sequential colormap.
Parameters
----------
data : np.ndarray
Input data
robust : bool
If True, discard the top and bottom 2% of data when calculating
range.
cmap_seq : str
The sequential colormap name
cmap_bool : str
The boolean colormap name
cmap_div : str
The diverging colormap name
Returns
-------
cmap : matplotlib.colors.Colormap
The colormap to use for `data`
See Also
--------
matplotlib.pyplot.colormaps
'''
data = np.atleast_1d(data)
if data.dtype == 'bool':
return get_cmap(cmap_bool)
data = data[np.isfinite(data)]
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
max_val = np.percentile(data, max_p)
min_val = np.percentile(data, min_p)
if min_val >= 0 or max_val <= 0:
return get_cmap(cmap_seq)
return get_cmap(cmap_div)
def __envelope(x, hop):
'''Compute the max-envelope of non-overlapping frames of x at length hop
x is assumed to be multi-channel, of shape (n_channels, n_samples).
'''
x_frame = np.abs(util.frame(x, frame_length=hop, hop_length=hop))
return x_frame.max(axis=1)
def waveplot(y, sr=22050, max_points=5e4, x_axis='time', offset=0.0,
max_sr=1000, ax=None, **kwargs):
'''Plot the amplitude envelope of a waveform.
If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.
If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,
so that the left and right channels are drawn above and below the axis,
respectively.
Long signals (`duration >= max_points`) are down-sampled to at
most `max_sr` before plotting.
Parameters
----------
y : np.ndarray [shape=(n,) or (2,n)]
audio time series (mono or stereo)
sr : number > 0 [scalar]
sampling rate of `y`
max_points : postive number or None
Maximum number of time-points to plot: if `max_points` exceeds
the duration of `y`, then `y` is downsampled.
If `None`, no downsampling is performed.
x_axis : str or None
Display of the x-axis ticks and tick markers. Accepted values are:
- 'time' : markers are shown as milliseconds, seconds, minutes, or hours.
Values are plotted in units of seconds.
- 's' : markers are shown as seconds.
- 'ms' : markers are shown as milliseconds.
- 'lag' : like time, but past the halfway point counts as negative values.
- 'lag_s' : same as lag, but in seconds.
- 'lag_ms' : same as lag, but in milliseconds.
- `None`, 'none', or 'off': ticks and tick markers are hidden.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
offset : float
Horizontal offset (in seconds) to start the waveform plot
max_sr : number > 0 [scalar]
Maximum sampling rate for the visualization
kwargs
Additional keyword arguments to `matplotlib.pyplot.fill_between`
Returns
-------
pc : matplotlib.collections.PolyCollection
The PolyCollection created by `fill_between`.
See also
--------
librosa.core.resample
matplotlib.pyplot.fill_between
Examples
--------
Plot a monophonic waveform
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Monophonic')
Or a stereo waveform
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... mono=False, duration=10)
>>> plt.subplot(3, 1, 2)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Stereo')
Or harmonic and percussive components with transparency
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> y_harm, y_perc = librosa.effects.hpss(y)
>>> plt.subplot(3, 1, 3)
>>> librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
>>> librosa.display.waveplot(y_perc, sr=sr, color='r', alpha=0.5)
>>> plt.title('Harmonic + Percussive')
>>> plt.tight_layout()
>>> plt.show()
'''
util.valid_audio(y, mono=False)
if not (isinstance(max_sr, int) and max_sr > 0):
raise ParameterError('max_sr must be a non-negative integer')
target_sr = sr
hop_length = 1
# Pad an extra channel dimension, if necessary
if y.ndim == 1:
y = y[np.newaxis, :]
if max_points is not None:
if max_points <= 0:
raise ParameterError('max_points must be strictly positive')
if max_points < y.shape[-1]:
target_sr = min(max_sr, (sr * y.shape[-1]) // max_points)
hop_length = sr // target_sr
# Reduce by envelope calculation
y = __envelope(y, hop_length)
y_top = y[0]
y_bottom = -y[-1]
axes = __check_axes(ax)
kwargs.setdefault('color', next(axes._get_lines.prop_cycler)['color'])
locs = offset + core.times_like(y_top, sr=sr, hop_length=hop_length)
out = axes.fill_between(locs, y_bottom, y_top, **kwargs)
axes.set_xlim([locs.min(), locs.max()])
# Construct tickers and locators
__decorate_axis(axes.xaxis, x_axis)
return out
def specshow(data, x_coords=None, y_coords=None,
x_axis=None, y_axis=None,
sr=22050, hop_length=512,
fmin=None, fmax=None,
tuning=0.0,
bins_per_octave=12,
ax=None,
**kwargs):
'''Display a spectrogram/chromagram/cqt/etc.
Parameters
----------
data : np.ndarray [shape=(d, n)]
Matrix to display (e.g., spectrogram)
sr : number > 0 [scalar]
Sample rate used to determine time scale in x-axis.
hop_length : int > 0 [scalar]
Hop length, also used to determine time scale in x-axis
x_axis : None or str
y_axis : None or str
Range for the x- and y-axes.
Valid types are:
- None, 'none', or 'off' : no axis decoration is displayed.
Frequency types:
- 'linear', 'fft', 'hz' : frequency range is determined by
the FFT window and sampling rate.
- 'log' : the spectrum is displayed on a log scale.
- 'mel' : frequencies are determined by the mel scale.
- 'cqt_hz' : frequencies are determined by the CQT scale.
- 'cqt_note' : pitches are determined by the CQT scale.
All frequency types are plotted in units of Hz.
Categorical types:
- 'chroma' : pitches are determined by the chroma filters.
Pitch classes are arranged at integer locations (0-11).
- 'tonnetz' : axes are labeled by Tonnetz dimensions (0-5)
- 'frames' : markers are shown as frame counts.
Time types:
- 'time' : markers are shown as milliseconds, seconds, minutes, or hours.
Values are plotted in units of seconds.
- 's' : markers are shown as seconds.
- 'ms' : markers are shown as milliseconds.
- 'lag' : like time, but past the halfway point counts as negative values.
- 'lag_s' : same as lag, but in seconds.
- 'lag_ms' : same as lag, but in milliseconds.
Rhythm:
- 'tempo' : markers are shown as beats-per-minute (BPM)
using a logarithmic scale. This is useful for
visualizing the outputs of `feature.tempogram`.
- 'fourier_tempo' : same as `'tempo'`, but used when
tempograms are calculated in the Frequency domain
using `feature.fourier_tempogram`.
x_coords : np.ndarray [shape=data.shape[1]+1]
y_coords : np.ndarray [shape=data.shape[0]+1]
Optional positioning coordinates of the input data.
These can be use to explicitly set the location of each
element `data[i, j]`, e.g., for displaying beat-synchronous
features in natural time coordinates.
If not provided, they are inferred from `x_axis` and `y_axis`.
fmin : float > 0 [scalar] or None
Frequency of the lowest spectrogram bin. Used for Mel and CQT
scales.
If `y_axis` is `cqt_hz` or `cqt_note` and `fmin` is not given,
it is set by default to `note_to_hz('C1')`.
fmax : float > 0 [scalar] or None
Used for setting the Mel frequency scales
tuning : float
Tuning deviation from A440, in fractions of a bin.
This is used for CQT frequency scales, so that `fmin` is adjusted
to `fmin * 2**(tuning / bins_per_octave)`.
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Used for CQT frequency scale.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
kwargs : additional keyword arguments
Arguments passed through to `matplotlib.pyplot.pcolormesh`.
By default, the following options are set:
- `rasterized=True`
- `shading='flat'`
- `edgecolors='None'`
Returns
-------
axes
The axis handle for the figure.
See Also
--------
cmap : Automatic colormap detection
matplotlib.pyplot.pcolormesh
Examples
--------
Visualize an STFT power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> plt.figure(figsize=(12, 8))
>>> D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
>>> plt.subplot(4, 2, 1)
>>> librosa.display.specshow(D, y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear-frequency power spectrogram')
Or on a logarithmic scale
>>> plt.subplot(4, 2, 2)
>>> librosa.display.specshow(D, y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-frequency power spectrogram')
Or use a CQT scale
>>> CQT = librosa.amplitude_to_db(np.abs(librosa.cqt(y, sr=sr)), ref=np.max)
>>> plt.subplot(4, 2, 3)
>>> librosa.display.specshow(CQT, y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (note)')
>>> plt.subplot(4, 2, 4)
>>> librosa.display.specshow(CQT, y_axis='cqt_hz')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (Hz)')
Draw a chromagram with pitch classes
>>> C = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> plt.subplot(4, 2, 5)
>>> librosa.display.specshow(C, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Chromagram')
Force a grayscale colormap (white -> black)
>>> plt.subplot(4, 2, 6)
>>> librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear power spectrogram (grayscale)')
Draw time markers automatically
>>> plt.subplot(4, 2, 7)
>>> librosa.display.specshow(D, x_axis='time', y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log power spectrogram')
Draw a tempogram with BPM markers
>>> plt.subplot(4, 2, 8)
>>> Tgram = librosa.feature.tempogram(y=y, sr=sr)
>>> librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
>>> plt.colorbar()
>>> plt.title('Tempogram')
>>> plt.tight_layout()
>>> plt.show()
Draw beat-synchronous chroma in natural time
>>> plt.figure()
>>> tempo, beat_f = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> beat_f = librosa.util.fix_frames(beat_f, x_max=C.shape[1])
>>> Csync = librosa.util.sync(C, beat_f, aggregate=np.median)
>>> beat_t = librosa.frames_to_time(beat_f, sr=sr)
>>> ax1 = plt.subplot(2,1,1)
>>> librosa.display.specshow(C, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (linear time)')
>>> ax2 = plt.subplot(2,1,2, sharex=ax1)
>>> librosa.display.specshow(Csync, y_axis='chroma', x_axis='time',
... x_coords=beat_t)
>>> plt.title('Chroma (beat time)')
>>> plt.tight_layout()
>>> plt.show()
'''
if np.issubdtype(data.dtype, np.complexfloating):
warnings.warn('Trying to display complex-valued input. '
'Showing magnitude instead.')
data = np.abs(data)
kwargs.setdefault('cmap', cmap(data))
kwargs.setdefault('rasterized', True)
kwargs.setdefault('edgecolors', 'None')
kwargs.setdefault('shading', 'flat')
all_params = dict(kwargs=kwargs,
sr=sr,
fmin=fmin,
fmax=fmax,
tuning=tuning,
bins_per_octave=bins_per_octave,
hop_length=hop_length)
# Get the x and y coordinates
y_coords = __mesh_coords(y_axis, y_coords, data.shape[0], **all_params)
x_coords = __mesh_coords(x_axis, x_coords, data.shape[1], **all_params)
axes = __check_axes(ax)
out = axes.pcolormesh(x_coords, y_coords, data, **kwargs)
__set_current_image(ax, out)
axes.set_xlim(x_coords.min(), x_coords.max())
axes.set_ylim(y_coords.min(), y_coords.max())
# Set up axis scaling
__scale_axes(axes, x_axis, 'x')
__scale_axes(axes, y_axis, 'y')
# Construct tickers and locators
__decorate_axis(axes.xaxis, x_axis)
__decorate_axis(axes.yaxis, y_axis)
return axes
def __set_current_image(ax, img):
'''Helper to set the current image in pyplot mode.
If the provided `ax` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
'''
if ax is None:
import matplotlib.pyplot as plt
plt.sci(img)
def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'fourier_tempo': __coord_fourier_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs)
def __check_axes(axes):
'''Check if "axes" is an instance of an axis object. If not, use `gca`.'''
if axes is None:
import matplotlib.pyplot as plt
axes = plt.gca()
elif not isinstance(axes, Axes):
raise ValueError("`axes` must be an instance of matplotlib.axes.Axes. "
"Found type(axes)={}".format(type(axes)))
return axes
def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type in ['tempo', 'fourier_tempo']:
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs)
def __decorate_axis(axis, ax_type):
'''Configure axis tickers, locators, and labels'''
if ax_type == 'tonnetz':
axis.set_major_formatter(TonnetzFormatter())
axis.set_major_locator(FixedLocator(0.5 + np.arange(6)))
axis.set_label_text('Tonnetz')
elif ax_type == 'chroma':
axis.set_major_formatter(ChromaFormatter())
axis.set_major_locator(FixedLocator(0.5 +
np.add.outer(12 * np.arange(10),
[0, 2, 4, 5, 7, 9, 11]).ravel()))
axis.set_label_text('Pitch class')
elif ax_type in ['tempo', 'fourier_tempo']:
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_label_text('BPM')
elif ax_type == 'time':
axis.set_major_formatter(TimeFormatter(unit=None, lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time')
elif ax_type == 's':
axis.set_major_formatter(TimeFormatter(unit='s', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (s)')
elif ax_type == 'ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (ms)')
elif ax_type == 'lag':
axis.set_major_formatter(TimeFormatter(unit=None, lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag')
elif ax_type == 'lag_s':
axis.set_major_formatter(TimeFormatter(unit='s', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (s)')
elif ax_type == 'lag_ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (ms)')
elif ax_type == 'cqt_note':
axis.set_major_formatter(NoteFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(NoteFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Note')
elif ax_type in ['cqt_hz']:
axis.set_major_formatter(LogHzFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(LogHzFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Hz')
elif ax_type in ['mel', 'log']:
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(SymmetricalLogLocator(axis.get_transform()))
axis.set_label_text('Hz')
elif ax_type in ['linear', 'hz']:
axis.set_major_formatter(ScalarFormatter())
axis.set_label_text('Hz')
elif ax_type in ['frames']:
axis.set_label_text('Frames')
elif ax_type in ['off', 'none', None]:
axis.set_label_text('')
axis.set_ticks([])
def __coord_fft_hz(n, sr=22050, **_kwargs):
'''Get the frequencies for FFT bins'''
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = core.fft_frequencies(sr=sr, n_fft=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
'''Get the frequencies for Mel bins'''
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, **_kwargs):
'''Get CQT bin frequencies'''
if fmin is None:
fmin = core.note_to_hz('C1')
# Apply tuning correction
fmin = fmin * 2.0**(_kwargs.get('tuning', 0.0) / bins_per_octave)
# we drop by half a bin so that CQT bins are centered vertically
return core.cqt_frequencies(n+1,
fmin=fmin / 2.0**(0.5/bins_per_octave),
bins_per_octave=bins_per_octave)
def __coord_chroma(n, bins_per_octave=12, **_kwargs):
'''Get chroma bin numbers'''
return np.linspace(0, (12.0 * n) / bins_per_octave, num=n+1, endpoint=True)
def __coord_tempo(n, sr=22050, hop_length=512, **_kwargs):
'''Tempo coordinates'''
basis = core.tempo_frequencies(n+2, sr=sr, hop_length=hop_length)[1:]
edges = np.arange(1, n+2)
return basis * (edges + 0.5) / edges
def __coord_fourier_tempo(n, sr=22050, hop_length=512, **_kwargs):
'''Fourier tempogram coordinates'''
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = core.fourier_tempo_frequencies(sr=sr,
hop_length=hop_length,
win_length=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def __coord_n(n, **_kwargs):
'''Get bare positions'''
return np.arange(n+1)
def __coord_time(n, sr=22050, hop_length=512, **_kwargs):
'''Get time coordinates from frames'''
return core.frames_to_time(np.arange(n+1), sr=sr, hop_length=hop_length)
| isc |
planetceres/bitcoin-nn | utils/cleaner.py | 1 | 6567 | import pandas as pd
import math
import time
import numpy as np
'''Find and replace NaN values'''
def est_nan(data, target_feature, reference_feature):
plotting = False # Show plots for data estimation where missing values were found
# Max number of values to use for ratio
tail_n = 100
# make sure there are values for first and last rows
if (pd.isnull(data[target_feature].iloc[-1])):
print('NaN values at end of data with length: ' + str(len(data)))
trim_at = data[target_feature].iloc[:(len(data) - 1)].last_valid_index()
row_drop_num = len(data) - trim_at
print('Dropping %d rows' % row_drop_num)
data = data.drop(data.index[trim_at: -1])
print('New length of dataset: ' + str(len(data)))
if (pd.isnull(data[target_feature].iloc[0])):
print('NaN values at beginning of data with length: ' + str(len(data)))
trim_at = data[target_feature].iloc[0:].first_valid_index()
row_drop_num = trim_at
print('Dropping %d rows' % row_drop_num)
data = data.drop(data.index[0: trim_at])
print('New length of dataset: ' + str(len(data)))
# find indexes of NaNs in A and B columns and create arrays
nanindex = data.index[data[target_feature].apply(np.isnan)]
valIndex = data.index[data[target_feature].apply(np.isfinite)]
valAIndex = data.index[data[reference_feature].apply(np.isfinite)]
dualIndex = data.index[data[target_feature].apply(np.isfinite) & data[reference_feature].apply(np.isfinite)]
df_index = data.index.values.tolist()
nindex = [df_index.index(i) for i in nanindex]
# valArray = [df_index.index(i) for i in valIndex]
# bcRatio set as 1, unless using Coindesk values to fill in NaNs
try:
# sum the last 100 values (~2 hours) of ticker data to get the conversion rate
bcRatio = (
sum(data[target_feature].ix[dualIndex].tail(tail_n)) / sum(data[reference_feature].ix[dualIndex].tail(tail_n)))
except:
bcRatio = 1
# Find nearest value function
def find_nearest(array, value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])):
return array[idx - 1]
else:
return array[idx]
nanStart = 0
nanEnd = 0
prevNanIndex = -1
for n in range(len(nindex)):
# Indices of NaN array
n_i_1t = (nindex[n] - 1)
n_i_t = nindex[n]
n_i_t1 = (nindex[n] + 1)
# Values of NaN Array
n_v_1t = data.ix[n_i_1t][reference_feature]
# If the last value in the data array is NaN
# and the next value is not NaN
if (prevNanIndex == n_i_1t) & (n_i_t1 not in nindex):
# The NaN Series ends with the next non NaN index
nanEnd = n_i_t1
placeholder = float(data.loc[nanStart, target_feature])
# The number of NaN values in the series
nanDiff = nanEnd - (nanStart + 1)
# The averaged difference in values between start of NaN series and end of NaN Series
diff = (data.ix[nanEnd][target_feature] - data.ix[nanStart][target_feature]) / (nanDiff + 1)
# For each NaN in series, replace with scaled value
for i in range(nanDiff):
# Local index of NaN series
r = i + 1
# Global index of the dataframe
row_index = nanStart + r
# Find the nearest value to serve as reference
nearestA = find_nearest(valAIndex, (row_index))
nearestB = find_nearest(valIndex, (row_index))
nnA = abs(nearestA - row_index)
nnB = abs(nearestB - row_index)
if (nnB <= nnA):
# Increment by the averaged difference
increment = r * diff
estimated = (placeholder + increment)
data.loc[row_index, target_feature] = estimated
else:
# If A is closer use the conversion rate to port over values
placeholderA = data.loc[nearestA, reference_feature]
estimated = placeholderA * float(bcRatio)
data.loc[row_index, target_feature] = estimated
# Reset Series Variables
nanStart = 0
nanEnd = 0
prevNanIndex = -1
# If the last value was NaN and so is the next
elif (prevNanIndex == n_i_1t) & (n_i_t1 in nindex):
pass
# If the last value is not NaN, but the next is, mark the start index
elif (n_i_1t not in nindex) & (n_i_t1 in nindex):
nanStart = n_i_1t
# If only one NaN is found isolated, use the preceding and folling values to fill it in
elif (n_i_t1 not in nindex) & (n_i_t1 not in nindex):
nanDiff = n_i_t1 - (n_i_1t + 1)
placeholder = float(data.loc[n_i_1t, target_feature])
diff = (data.ix[n_i_t1][target_feature] - data.ix[n_i_1t][target_feature]) / float(nanDiff + 1)
row_index = n_i_t
estimated = (data.ix[n_i_1t][target_feature] + diff) * bcRatio
data.loc[row_index, target_feature] = estimated
# Reset Series Variables
nanStart = 0
nanEnd = 0
prevNanIndex = -1
else:
print("Error matching NaN series")
nanStart = n_i_1t
# Set the index of the last NaN to the current index
prevNanIndex = nindex[n]
if plotting == True:
# print(data)
plot_results(data.index, data[target_feature], data[reference_feature])
return data
def replace_nans_noise(data, feature_columns):
for col in range(len(feature_columns)):
standard_deviation = data[feature_columns[col]].std(axis=0, skipna=True)
mean_data = data[feature_columns[col]].mean(axis=0, skipna=True)
data[feature_columns[col]] = [np.random.normal(mean_data, standard_deviation, 1)[0]
if pd.isnull(data[feature_columns[col]].iloc[row])
else data[feature_columns[col]].iloc[row]
for row in range(len(data))]
return data
# Plot results
def plot_results(X_plot, A_plot, B_plot):
plt.plot(X_plot, A_plot, 'blue', alpha=0.5)
plt.plot(X_plot, B_plot, 'red', alpha=0.5)
plt.legend(loc='lower left')
plt.show() | mit |
thientu/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
mhue/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 16 | 12745 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
yousrabk/mne-python | examples/preprocessing/plot_eog_artifact_histogram.py | 11 | 1465 | """
========================
Show EOG artifact timing
========================
Compute the distribution of timing for EOG artifacts.
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
###############################################################################
# Plot EOG artifact distribution
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
| bsd-3-clause |
lpsinger/astropy | astropy/visualization/wcsaxes/frame.py | 8 | 10649 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self.data = None
self.pixel = None
self.world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = value
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams['axes.linewidth']
self._color = rcParams['axes.edgecolor']
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor=rcParams['axes.facecolor'], edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = self.spine_class(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'bt'
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000,
transform=self.parent_axes.transData)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
| bsd-3-clause |
glennq/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
ak681443/mana-deep | evaluation/allmods/ae_tries_mods/FindBestMatch.py | 1 | 4458 |
# coding: utf-8
# In[1]:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
# In[2]:
th = int(sys.argv[1])
v = int(sys.argv[2])
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = np.zeros((224,224))
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[img1>=th] = 0
masks = masks + img1
masks = masks / v
#img1[masks>20] = 0
#print np.average(masks)
#plt.imshow(img1)
# In[3]:
input_img = Input(shape=(224, 224,1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad', verbose=0)
# In[4]:
model.load_weights(sys.argv[3], by_name=True)
# In[5]:
def push_pqueue(queue, priority, value):
if len(queue)>10:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[masks>60] = 0
img1[img1>=th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test), 224, 224, 1))
X_test_pred = model.predict(X_test, verbose=0)
# In[8]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[masks>60] = 0
img1[img1>=th] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), 224, 224, 1))
X_train_pred = model.predict(X_train, verbose=0)
# In[9]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
top10_correct = 0
top5_correct = 0
top1_correct = 0
for i in np.arange(0, len(files1)):
filen1 = files1[i]
pred = X_test_pred[i]
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_eval/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
masks = np.zeros((224,224))
max_confidence = 0.0
max_file = None
pqueue = []
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j]
score = 1 - spatial.distance.cosine(tpred.sum(axis=2).flatten(), pred.sum(axis=2).flatten())
push_pqueue(pqueue, score, filen)
if max_confidence < score:
max_confidence = score
max_file = filen
i = 0
for top20 in heapq.nlargest(len(pqueue), pqueue):
i += 1
if top20[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
if i>5:
top10_correct+=1
elif i>=1:
top10_correct+=1
top5_correct+=1
elif i>=0:
top10_correct+=1
top5_correct+=1
top1_correct+=1
break
print "\n!@#$", top10_correct/float(len(files1)), top5_correct/float(len(files1)), top1_correct,"\n"
| apache-2.0 |
reflectometry/osrefl | doc/conf.py | 1 | 7685 | # -*- coding: utf-8 -*-
#
# OsRefl documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 08 11:18:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Put the build lib on the start of the path.
from distutils.util import get_platform
platform = '.%s-%s'%(get_platform(),sys.version[:3])
build_lib = os.path.abspath('../build/lib'+platform)
sys.path.insert(0, build_lib)
sys.path.append(os.path.abspath('_extensions'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.coverage', 'sphinx.ext.pngmath',
#'only_directives',
#'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
#'inheritance_diagram',
#'dollarmath',
]
html_theme = 'default'
html_theme_options = {
"rightsidebar":"False",
"relbarbgcolor": "Gainsboro",
#"codebgcolor": "Beige",
"footertextcolor": "orange",
"relbartextcolor": "Blue",
"sidebarbgcolor": "white",
"sidebarlinkcolor": "OrangeRed",
"sidebartextcolor": "Navy",
"linkcolor": "Blue",
"relbarlinkcolor": "Blue"
#"headtextcolor": "Teal"
#"headbgcolor": "Oldlace",
}
html_style = 'site.css'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Off-specular Modeling Software'
copyright = u'2010, Christopher Metting'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
from osrefl import __version__ as release
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'osrefl'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OsRefl.tex', u'OsRefl Documentation',
u'Christopher Metting', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause |
didmar/jrl | plot_perf.py | 1 | 2175 | #!/usr/bin/python
# Plot the performances from a file with the following format :
# |# first commented line
# |# ...
# |# last commented line
# |total_nb_epi_aft_iter1 total_nb_epi_aft_iter2 ... total_nb_epi_aft_iterM
# |perf_trial1_iter1 perf_trial1_iter2 ... perf_trial1_iterM
# |perf_trial2_iter1 perf_trial2_iter2 ... perf_trial2_iterM
# |... ... ...
# |perf_trialN_iter1 perf_trialN_iter2 ... perf_trialN_iterM
#
# See for example jrl.examples.BenchmarkGARNET
# TODO show the std dev
from numpy import *
from numpy.random import rand
from numpy.linalg import norm
from matplotlib import *
from matplotlib.pyplot import *
from numpy.core.fromnumeric import mean
nPerfFiles = (len(sys.argv)-1)/2
if len(sys.argv) < 3 or ((len(sys.argv)-1)%2) != 0:
print "Usage : plot_perf.py perf_title1 perf_filename1 [perf_title2 perf_filename2 ...]"
else:
xlogscale = True # TODO should be an argument of the script
fig = figure()
if xlogscale:
subplot(111, xscale="log")
xlabel("Episodes")
ylabel("Performance")
legends = []
for k in xrange(nPerfFiles):
#print "- ",sys.argv[1+k*2]
legends.append(sys.argv[1+k*2])
lines = open(sys.argv[2+k*2], 'r').readlines()
# Remove commented lines
while lines[0][0] == '#':
del lines[0]
# Then, the first line contains the total number of episodes
# after each iteration
strNEpis = lines.pop(0).split(" ")
nIters = len(strNEpis)
nEpis = empty(nIters)
for i in xrange(nIters):
nEpis[i] = float(strNEpis[i])
# Now we can go through the perf of each trial and each iteration
nTrials = len(lines)
perf = empty((nTrials,nIters))
for i in xrange(nTrials):
sp = lines[i].split(" ")
for j in xrange(nIters):
perf[i,j] = float(sp[j])
#plot(nEpis,mean(perf, axis=0))
errorbar(nEpis,mean(perf, axis=0), yerr=std(perf, axis=0), label=sys.argv[1+k*2])
#legend(legends,loc='best')
legend(loc='best')
show()
| gpl-2.0 |
MechCoder/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
timestocome/Test-stock-prediction-algorithms | StockMarketMovingAverage/MovingAvgLongShort.py | 1 | 2657 | # http://github.com/timestocome
# how well you do depends on which phase of the market you
# start on - Also you need to invest a large amount to cover
# commissions
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# variables to adjust
short_ma = 42 # ~ 21 days per trading month
long_ma = 252 # ~ 252 trading days per year
threshold = 50 # point difference
start_year = 1985
# file names for data
djia_file = 'data/djia.csv'
nasdaq_file = 'data/nasdaq.csv'
sp_file = 'data/S&P.csv'
russell_file = 'data/Russell2000.csv'
gold_file = 'data/GOLD.csv'
# read in file
def read_data(file_name):
stock = pd.read_csv(file_name, parse_dates=True, index_col=0) # 31747 days of data
n_samples = len(stock)
# flip order from newest to oldest to oldest to newest
stock = stock.iloc[::-1]
return stock
# read in data
stock = read_data(sp_file)
# moving average
stock['short'] = np.round(stock['Close'].rolling(window=short_ma).mean(), 2)
stock['long'] = np.round(stock['Close'].rolling(window=long_ma).mean(), 2)
# plot
#stock[['Close', 'short', 'long']].plot(grid=True)
#plt.show()
###########################################################################
# test buy long, wait, sell short strategies
###########################################################################
stock['diff'] = stock['short'] - stock['long']
stock['plan'] = np.where(stock['diff'] > threshold, 1, 0)
stock['plan'] = np.where(stock['diff'] < -threshold, -1, stock['plan'])
# ditch old data
stock["Year"] = pd.DatetimeIndex(stock.index).year
stock = stock[stock['Year'] > start_year]
# plot plan
#stock['plan'].plot(linewidth=2)
#plt.ylim([-1.1, 1.1])
#plt.show()
# test plan
# make market when invested (+/-1), make 0 when sitting on cash
# buy in when short term is more than threshold above long
# sell when long is more than threshold above short
# use returns instead of prices to normalize data
# use log to simplify the math and avoid skew ( http://www.dcfnerds.com/94/arithmetic-vs-logarithmic-rates-of-return/)
#stock['returns'] = np.log(stock['Close'].shift(-1) / stock['Close']) # daily difference in price
stock['returns'] = np.log(stock['Close'] / stock['Close'].shift(1)) # daily difference in price
stock['strategy'] = stock['plan'].shift(1) * stock['returns']
# don't forget the commissions
stock['fees'] = np.where(stock['strategy'] == 0, 0, 1)
stock['fees'] = stock['fees'] * 7.
stock[['returns', 'strategy']].cumsum().apply(np.exp).plot()
plt.title("S&P Returns vs Market Moving Averages 42/252 Fees: %.2lf " %(stock['fees'].sum()))
plt.show()
| mit |
timcera/mettoolbox | mettoolbox/utils.py | 1 | 4034 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from solarpy import declination
from tstoolbox import tsutils
def _check_cols(*args):
# (2, "tmin")
coll_cols = []
coll_names = []
for col, name in args:
if col is None:
continue
coll_cols.append(col)
coll_names.append(name)
return coll_cols, coll_names
def _check_temperature_cols(
temp_min_col=None,
temp_max_col=None,
temp_mean_col=None,
temp_min_required=False,
temp_max_required=False,
):
"""Check temperature columns to make sure necessary ones are filled in."""
if temp_min_col is None and temp_min_required is True:
raise ValueError(
tsutils.error_wrapper(
"""
This evaporation method requires the minimum daily temperature column to be specified with "temp_min_col".""".format(
**locals
)
)
)
if temp_max_col is None and temp_max_required is True:
raise ValueError(
tsutils.error_wrapper(
"""
This evaporation method requires the maximum daily temperature column to be specified with "temp_max_col".""".format(
**locals
)
)
)
if temp_min_col is None or temp_max_col is None:
raise ValueError(
tsutils.error_wrapper(
"""
If you do not pass a mean daily temperature column in "temp_mean_col"
you must give both minimum and maximum daily temperatures using
"temp_min_col" and "temp_max_col".
You gave {temp_min_col} for "temp_min_col" and
{temp_max_col} for "temp_max_col". """.format(
**locals
)
)
)
return _check_cols(
(temp_min_col, "tmin"), (temp_max_col, "tmax"), (temp_mean_col, "tmean")
)
def _validate_temperatures(tsd, temp_min_col, temp_max_col):
if "tmean" not in tsd.columns:
if (tsd.tmax < tsd.tmin).any():
raise ValueError(
tsutils.error_wrapper(
"""
On the following dates:
{0},
minimum temperature values in column "{1}" are greater than or
equal to the maximum temperature values in column "{2}".""".format(
tsd[tsd.tmax < tsd.tmin].index, temp_min_col, temp_max_col
)
)
)
warnings.warn(
tsutils.error_wrapper(
""" Since `temp_mean_col` is None, the average daily temperature will be
estimated by the average of `temp_min_col` and `temp_max_col`""".format(
**locals()
)
)
)
tsd["tmean"] = (tsd.tmin + tsd.tmax) / 2.0
else:
if (tsd.tmin >= tsd.tmean).any() or (tsd.tmax <= tsd.tmean).any():
raise ValueError(
tsutils.error_wrapper(
""" On the following dates:
{0},
the daily average is either below or equal to the minimum temperature in column {1} or higher or equal to the maximum temperature in column
{2}.""".format(
tsd[tsd.tmin >= tsd.tmean | tsd.tmax <= tsd.tmean],
temp_min_col,
temp_max_col,
)
)
)
return tsd
def radiation(tsd, lat):
jday = tsd.index.dayofyear.astype("i").values
lrad = lat * np.pi / 180.0
dec = [declination(i) for i in tsd.index.to_pydatetime()]
s = np.arccos(-np.tan(dec) * np.tan(lrad))
# FAO radiation calculation
dr = 1.0 + 0.033 * np.cos(2 * np.pi * jday / 365)
# FAO radiation calculation
ra = (
118.08
/ np.pi
* dr
* (s * np.sin(lrad) * np.sin(dec) + np.cos(lrad) * np.cos(dec) * np.sin(s))
)
newra = pd.DataFrame(ra, index=tsd.index, columns=["ra"])
return newra
| bsd-3-clause |
datapythonista/pandas | pandas/io/json/_table_schema.py | 1 | 10103 | """
Table Schema builders
https://specs.frictionlessdata.io/json-table-schema/
"""
from typing import (
TYPE_CHECKING,
Any,
Dict,
Optional,
cast,
)
import warnings
import pandas._libs.json as json
from pandas._typing import (
DtypeObj,
FrameOrSeries,
JSONSerializable,
)
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import DataFrame
import pandas.core.common as com
if TYPE_CHECKING:
from pandas.core.indexes.multi import MultiIndex
loads = json.loads
def as_json_table_type(x: DtypeObj) -> str:
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : np.dtype or ExtensionDtype
Returns
-------
str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return "integer"
elif is_bool_dtype(x):
return "boolean"
elif is_numeric_dtype(x):
return "number"
elif is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or is_period_dtype(x):
return "datetime"
elif is_timedelta64_dtype(x):
return "duration"
elif is_categorical_dtype(x):
return "any"
elif is_string_dtype(x):
return "string"
else:
return "any"
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if com.all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == "index":
warnings.warn("Index name of 'index' is not round-trippable")
elif len(nms) > 1 and any(x.startswith("level_") for x in nms):
warnings.warn("Index names beginning with 'level_' are not round-trippable")
return data
data = data.copy()
if data.index.nlevels > 1:
names = [
name if name is not None else f"level_{i}"
for i, name in enumerate(data.index.names)
]
data.index.names = names
else:
data.index.name = data.index.name or "index"
return data
def convert_pandas_type_to_json_field(arr):
dtype = arr.dtype
if arr.name is None:
name = "values"
else:
name = arr.name
field: Dict[str, JSONSerializable] = {
"name": name,
"type": as_json_table_type(dtype),
}
if is_categorical_dtype(dtype):
cats = dtype.categories
ordered = dtype.ordered
field["constraints"] = {"enum": list(cats)}
field["ordered"] = ordered
elif is_period_dtype(dtype):
field["freq"] = dtype.freq.freqstr
elif is_datetime64tz_dtype(dtype):
field["tz"] = dtype.tz.zone
return field
def convert_json_field_to_pandas_type(field):
"""
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
------
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"})
'int64'
>>> convert_json_field_to_pandas_type(
... {
... "name": "a_categorical",
... "type": "any",
... "constraints": {"enum": ["a", "b", "c"]},
... "ordered": True,
... }
... )
CategoricalDtype(categories=['a', 'b', 'c'], ordered=True)
>>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type(
... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"}
... )
'datetime64[ns, US/Central]'
"""
typ = field["type"]
if typ == "string":
return "object"
elif typ == "integer":
return "int64"
elif typ == "number":
return "float64"
elif typ == "boolean":
return "bool"
elif typ == "duration":
return "timedelta64"
elif typ == "datetime":
if field.get("tz"):
return f"datetime64[ns, {field['tz']}]"
else:
return "datetime64[ns]"
elif typ == "any":
if "constraints" in field and "ordered" in field:
return CategoricalDtype(
categories=field["constraints"]["enum"], ordered=field["ordered"]
)
else:
return "object"
raise ValueError(f"Unsupported or invalid field type: {typ}")
def build_table_schema(
data: FrameOrSeries,
index: bool = True,
primary_key: Optional[bool] = None,
version: bool = True,
) -> Dict[str, JSONSerializable]:
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
Column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `Table Schema
<https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for
conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': \
[{'name': 'idx', 'type': 'integer'}, \
{'name': 'A', 'type': 'integer'}, \
{'name': 'B', 'type': 'string'}, \
{'name': 'C', 'type': 'datetime'}], \
'primaryKey': ['idx'], \
'pandas_version': '0.20.0'}
"""
if index is True:
data = set_default_names(data)
schema: Dict[str, Any] = {}
fields = []
if index:
if data.index.nlevels > 1:
data.index = cast("MultiIndex", data.index)
for level, name in zip(data.index.levels, data.index.names):
new_field = convert_pandas_type_to_json_field(level)
new_field["name"] = name
fields.append(new_field)
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.items():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema["fields"] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema["primaryKey"] = [data.index.name]
else:
schema["primaryKey"] = data.index.names
elif primary_key is not None:
schema["primaryKey"] = primary_key
if version:
schema["pandas_version"] = "0.20.0"
return schema
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : bool
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field["name"] for field in table["schema"]["fields"]]
df = DataFrame(table["data"], columns=col_order)[col_order]
dtypes = {
field["name"]: convert_json_field_to_pandas_type(field)
for field in table["schema"]["fields"]
}
# No ISO constructor for Timedelta as of yet, so need to raise
if "timedelta64" in dtypes.values():
raise NotImplementedError(
'table="orient" can not yet read ISO-formatted Timedelta data'
)
df = df.astype(dtypes)
if "primaryKey" in table["schema"]:
df = df.set_index(table["schema"]["primaryKey"])
if len(df.index.names) == 1:
if df.index.name == "index":
df.index.name = None
else:
df.index.names = [
None if x.startswith("level_") else x for x in df.index.names
]
return df
| bsd-3-clause |
GreenGear5/planet-wars | bots/randregular/randregular.py | 1 | 4316 | #!/usr/bin/env python
"""
A basic adaptive bot. This is part of the second worksheet.
"""
from api import State, util
import random, os
from sklearn.externals import joblib
DEFAULT_MODEL = os.path.dirname(os.path.realpath(__file__)) + '/randregular-model.pkl'
class Bot:
__max_depth = -1
__randomize = True
__model = None
def __init__(self, randomize=True, depth=4, model_file=DEFAULT_MODEL):
print(model_file)
self.__randomize = randomize
self.__max_depth = depth
# Load the model
self.__model = joblib.load(model_file)
def get_move(self, state):
val, move = self.value(state)
return move
def value(self, state, alpha=float('-inf'), beta=float('inf'), depth = 0):
"""
Return the value of this state and the associated move
:param state:
:param alpha: The highest score that the maximizing player can guarantee given current knowledge
:param beta: The lowest score that the minimizing player can guarantee given current knowledge
:param depth: How deep we are in the tree
:return: val, move: the value of the state, and the best move.
"""
if state.finished():
return (1.0, None) if state.winner() == 1 else (-1.0, None)
if depth == self.__max_depth:
return self.heuristic(state), None
best_value = float('-inf') if maximizing(state) else float('inf')
best_move = None
moves = state.moves()
if self.__randomize:
random.shuffle(moves)
for move in moves:
next_state = state.next(move)
value, m = self.value(next_state, alpha, beta, depth + 1)
if maximizing(state):
if value > best_value:
best_value = value
best_move = move
alpha = best_value
else:
if value < best_value:
best_value = value
best_move = move
beta = best_value
# Prune the search tree
# We know this state will never be chosen, so we stop evaluating its children
if alpha < beta:
break
return best_value, best_move
def heuristic(self, state):
# Convert the state to a feature vector
feature_vector = [features(state)]
# These are the classes: ('won', 'lost')
classes = list(self.__model.classes_)
# Ask the model for a prediction
# This returns a probability for each class
prob = self.__model.predict_proba(feature_vector)[0]
# print('{} {} {}'.format(classes, prob, util.ratio_ships(state, 1)))
# Weigh the win/loss outcomes (-1 and 1) by their probabilities
res = -1.0 * prob[classes.index('lost')] + 1.0 * prob[classes.index('won')]
# print(res)
return res
def maximizing(state):
"""
Whether we're the maximizing player (1) or the minimizing player (2).
:param state:
:return:
"""
return state.whose_turn() == 1
def features(state):
# type: (State) -> tuple[float, ...]
"""
Extract features from this state. Remember that every feature vector returned should have the same length.
:param state: A state to be converted to a feature vector
:return: A tuple of floats: a feature vector representing this state.
"""
my_id = state.whose_turn()
opponent_id = 1 if my_id == 0 else 0
# How many ships does p1 have in garrisons?
p1_garrisons = 0.0
# How many ships does p2 have in garrisons?
p2_garrisons = 0.0
p1_planets = 0
p2_planets = 0
for planet in state.planets(my_id):
p1_garrisons += state.garrison(planet)
p1_planets += 1
for planet in state.planets(opponent_id):
p2_garrisons += state.garrison(planet)
p2_planets += 1
# How many ships does p1 have in fleets?
p1_fleets = 0.0
# How many ships does p2 have in fleets?
p2_fleets = 0.0
for fleet in state.fleets():
if fleet.owner() == my_id:
p1_fleets = fleet.size()
else:
p2_fleets += fleet.size()
return p1_garrisons, p2_garrisons, p1_fleets, p2_fleets, p1_planets, p2_planets
| mit |
jseabold/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mehdidc/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
lyndsysimon/osf.io | scripts/analytics/utils.py | 30 | 1244 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import requests
from website import util
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(app, name, content_type, file_like, node, user):
"""Upload file to OSF."""
file_like.seek(0)
with app.test_request_context():
upload_url = util.waterbutler_url_for('upload', 'osfstorage', name, node, user=user)
requests.put(
upload_url,
data=file_like,
headers={'Content-Type': content_type},
)
| apache-2.0 |
jereze/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
deepchem/deepchem | examples/low_data/toxcast_maml.py | 4 | 3781 | from __future__ import print_function
import deepchem as dc
import numpy as np
import tensorflow as tf
from sklearn.metrics import accuracy_score
# Load the data.
tasks, datasets, transformers = dc.molnet.load_toxcast()
(train_dataset, valid_dataset, test_dataset) = datasets
x = train_dataset.X
y = train_dataset.y
w = train_dataset.w
n_features = x.shape[1]
n_molecules = y.shape[0]
n_tasks = y.shape[1]
# Toxcast has data on 6874 molecules and 617 tasks. However, the data is very
# sparse: most tasks do not include data for most molecules. It also is very
# unbalanced: there are many more negatives than positives. For each task,
# create a list of alternating positives and negatives so each batch will have
# equal numbers of both.
task_molecules = []
for i in range(n_tasks):
positives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 1]
negatives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 0]
np.random.shuffle(positives)
np.random.shuffle(negatives)
mols = sum((list(m) for m in zip(positives, negatives)), [])
task_molecules.append(mols)
# Define a MetaLearner describing the learning problem.
class ToxcastLearner(dc.metalearning.MetaLearner):
def __init__(self):
self.n_training_tasks = int(n_tasks * 0.8)
self.batch_size = 10
self.batch_start = [0] * n_tasks
self.set_task_index(0)
self.w1 = tf.Variable(
np.random.normal(size=[n_features, 1000], scale=0.02), dtype=tf.float32)
self.w2 = tf.Variable(
np.random.normal(size=[1000, 1], scale=0.02), dtype=tf.float32)
self.b1 = tf.Variable(np.ones(1000), dtype=tf.float32)
self.b2 = tf.Variable(np.zeros(1), dtype=tf.float32)
def compute_model(self, inputs, variables, training):
x, y = [tf.cast(i, tf.float32) for i in inputs]
w1, w2, b1, b2 = variables
dense1 = tf.nn.relu(tf.matmul(x, w1) + b1)
logits = tf.matmul(dense1, w2) + b2
output = tf.sigmoid(logits)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
return loss, [output]
@property
def variables(self):
return [self.w1, self.w2, self.b1, self.b2]
def set_task_index(self, index):
self.task = index
def select_task(self):
self.set_task_index((self.task + 1) % self.n_training_tasks)
def get_batch(self):
task = self.task
start = self.batch_start[task]
mols = task_molecules[task][start:start + self.batch_size]
labels = np.zeros((self.batch_size, 1))
labels[np.arange(self.batch_size), 0] = y[mols, task]
if start + 2 * self.batch_size > len(task_molecules[task]):
self.batch_start[task] = 0
else:
self.batch_start[task] += self.batch_size
return [x[mols, :], labels]
# Run meta-learning on 80% of the tasks.
n_epochs = 20
learner = ToxcastLearner()
maml = dc.metalearning.MAML(learner)
steps = n_epochs * learner.n_training_tasks // maml.meta_batch_size
maml.fit(steps)
# Validate on the remaining tasks.
def compute_scores(optimize):
maml.restore()
y_true = []
y_pred = []
losses = []
for task in range(learner.n_training_tasks, n_tasks):
learner.set_task_index(task)
if optimize:
maml.train_on_current_task(restore=True)
inputs = learner.get_batch()
loss, prediction = maml.predict_on_batch(inputs)
y_true.append(inputs[1])
y_pred.append(prediction[0][:, 0])
losses.append(loss)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
print()
print('Cross entropy loss:', np.mean(losses))
print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
print()
print('Before fine tuning:')
compute_scores(False)
print('After fine tuning:')
compute_scores(True)
| mit |
dualphase90/Learning-Neural-Networks | CS 224 D Tensorflow into.py | 1 | 3452 | # import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
a=tf.constant(5)
b=tf.constant(5)
c=a*b
with tf.Session() as sess:
print(sess.run(c))
x=c.eval()
print(c.eval())
print(x)
W1 = tf.ones((2,2))
W2 = tf.Variable(tf.zeros((2,2)), name="weights")
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(W2))
print(sess.run(W1))
#### Updating variable
state = tf.Variable(0, name="counter")
new_value = tf.add(state, tf.constant(1))
update = tf.assign(state, new_value)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(state))
for _ in range(3):
sess.run(update)
print(sess.run(state))
###Fetching Variable State (1)
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
intermed = tf.add(input2, input3)
mul = tf.mul(input1, intermed)
with tf.Session() as sess:
result = sess.run([mul, intermed])
print("result is :"+str(result))
###Inputting Data
a = np.zeros((3,3))
ta = tf.convert_to_tensor(a)
with tf.Session() as sess:
print(sess.run(ta))
### Placeholders and Feed Dictionaries (2)
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.mul(input1, input2)
#### Placeholders and Feed Dictionaries (2)
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.mul(input1, input2)
with tf.Session() as sess:
print(sess.run([output], feed_dict={input1:[7.], input2:[2.]}))
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
# assert v.name == "foo/bar/v:0
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
tf.get_variable_scope().reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
### Ex: Linear Regression in TensorFlow (1)
import numpy as np
import seaborn
import matplotlib.pyplot as plt
# Define input data
X_data = np.arange(100, step=.1)
y_data = X_data + 20 * np.sin(X_data/10)
# Plot input data
#plt.scatter(X_data, y_data)
# Define data size and batch size
n_samples = 1000
batch_size = 1000
# Tensorflow is finicky about shapes, so resize
X_data = np.reshape(X_data, (n_samples,1))
y_data = np.reshape(y_data, (n_samples,1))
# Define placeholders for input
X = tf.placeholder(tf.float32, shape=(batch_size, 1))
y = tf.placeholder(tf.float32, shape=(batch_size, 1))
# Define variables to be learned
with tf.variable_scope("linear-regression"):
W = tf.get_variable("weights", (1, 1),
initializer=tf.random_normal_initializer())
b = tf.get_variable("bias", (1,),
initializer=tf.constant_initializer(0.0))
y_pred = tf.matmul(X, W) + b
loss = tf.reduce_sum((y - y_pred)**2/n_samples)
opt = tf.train.AdamOptimizer()
opt_operation = opt.minimize(loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
sess.run([opt_operation], feed_dict={X: X_data, y: y_data})
# Sample code to run full gradient descent:
# Define optimizer operation
opt_operation = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
# Initialize Variables in graph
sess.run(tf.initialize_all_variables())
# Gradient descent loop for 500 steps
for _ in range(500):
# Select random minibatch
indices = np.random.choice(n_samples, batch_size)
X_batch, y_batch = X_data[indices], y_data[indices]
# Do gradient descent step
_, loss_val = sess.run([opt_operation, loss], feed_dict={X: X_batch, y: y_batch}) | mit |
vermouthmjl/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
xianjunzhengbackup/code | data science/machine_learning_for_the_web/chapter_2/pca.py | 1 | 1703 | import numpy as np
from matplotlib import pyplot as plt
#line y = 2*x
x = np.arange(1,101,1).astype(float)
y = 5*np.arange(1,101,1).astype(float)
#add noise
noise = np.random.normal(0, 10, 100)
y += noise
fig = plt.figure(figsize=(10,10))
#plot
plt.plot(x,y,'ro')
plt.axis([0,102, -20,220])
plt.quiver(60, 100,10-0, 20-0, scale_units='xy', scale=1)
plt.arrow(60, 100,10-0, 20-0,head_width=2.5, head_length=2.5, fc='k', ec='k')
plt.text(70, 110, r'$v^1$', fontsize=20)
#plt.show()
#save
ax = fig.add_subplot(111)
ax.axis([0,102, -20,220])
ax.set_xlabel('x',fontsize=40)
ax.set_ylabel('y',fontsize=40)
fig.suptitle('2 dimensional dataset',fontsize=40)
fig.savefig('pca_data.png')
#calc PCA
mean_x = np.mean(x)
mean_y = np.mean(y)
mean_vector = np.array([[mean_x],[mean_y]])
u_x = (x- mean_x)/np.std(x)
u_y = (y-mean_y)/np.std(y)
sigma = np.cov([u_x,u_y])
print sigma
eig_vals, eig_vecs = np.linalg.eig(sigma)
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i])
for i in range(len(eig_vals))]
eig_pairs.sort()
eig_pairs.reverse()
print eig_pairs
v1 = eig_pairs[0][1]
#leading eigenvector:
x_v1 = v1[0]*np.std(x)+mean_x
y_v1 = v1[1]*np.std(y)+mean_y
print x_v1,'-',y_v1,'slope:',(y_v1)/(x_v1)
from sklearn.decomposition import PCA
#X = np.array([x,y])
X = np.array([u_x,u_y])
X = X.T
#print X
pca = PCA(n_components=1)
pca.fit(X)
V = pca.components_
print V,'-',V[0][1]/V[0][0]
#transform in reduced space
X_red_sklearn = pca.fit_transform(X)
print X_red_sklearn.shape
W = np.array(v1.reshape(2,1))
X_red = W.T.dot(X.T)
#check the reduced matrices are equal
assert X_red.T.all() == X_red_sklearn.all(), 'problem with the pca algorithm'
print X_red.T[0],'-',X_red_sklearn[0] | mit |
ClimbsRocks/scikit-learn | sklearn/tests/test_calibration.py | 15 | 11959 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
ucbtrans/sumo-project | examples/timingPlan_simulation/Throughput/plots4doubleInt/tau_plots.py | 1 | 1894 | import sys
import optparse
import subprocess
import random
import pdb
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.rcParams.update({'font.size': 40})
import math
import numpy as np
import scipy.io
a3_100 = np.loadtxt('2min3RCT_taus_100m',dtype=int)
t3_100 = np.loadtxt('2min3RCT_taus_time_100m',dtype=int)
a3_300 = np.loadtxt('2min3RCT_taus_300m',dtype=int)
t3_300 = np.loadtxt('2min3RCT_taus_time_300m',dtype=int)
a3_500 = np.loadtxt('2min3RCT_taus_500m',dtype=int)
t3_500 = np.loadtxt('2min3RCT_taus_time_500m',dtype=int)
a0_100 = np.loadtxt('2min0RCT_taus_100m',dtype=int)
t0_100 = np.loadtxt('2min0RCT_taus_time_100m',dtype=int)
a0_300 = np.loadtxt('2min0RCT_taus_300m',dtype=int)
t0_300 = np.loadtxt('2min0RCT_taus_time_300m',dtype=int)
a0_500 = np.loadtxt('2min0RCT_taus_500m',dtype=int)
t0_500 = np.loadtxt('2min0RCT_taus_time_500m',dtype=int)
ss = [1440]*len(t3_100)
ts = np.subtract(t3_100,120)
# 100m intersection
plt.figure(1)
m1, = plt.plot(np.subtract(t0_100,120),a0_100,label='RCT: 0s',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t3_100,120),a3_100,label='RCT: 3s',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,ms],loc='lower left') #,fontsize=25)
plt.xlabel('Time (s)')
plt.ylabel('Throughput (veh/hr)')
plt.figure(2)
m1, = plt.plot(np.subtract(t0_500,120),a0_500,label='RCT: 0s',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t3_500,120),a3_500,label='RCT: 3s',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,ms],loc='lower left') #,fontsize=25)
plt.xlabel('Time (s)')
plt.ylabel('Throughput (veh/hr)')
plt.show()
| bsd-2-clause |
cerrno/neurokernel | examples/olfaction/visualize_output.py | 3 | 2498 | #!/usr/bin/env python
"""
Visualize olfactory model output.
"""
import re
import numpy as np
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import h5py
import networkx as nx
import neurokernel.tools.graph
g = nx.read_gexf('./data/antennallobe.gexf.gz')
df_node, df_edge = neurokernel.tools.graph.graph_to_df(g)
glom_name = 'DA1'
osn_ind = sorted(list(set([ind[0] for ind in \
df_edge[df_edge.name.str.contains('.*-%s_.*' % glom_name)].index])))
pn_ind = sorted(list(set([ind[1] for ind in \
df_edge[df_edge.name.str.contains('.*-%s_.*' % glom_name)].index])))
# Get OSN and PN label indices:
osn_ind_labels = [int(re.search('osn_.*_(\d+)', name).group(1)) \
for name in df_node.ix[osn_ind].name]
pn_ind_labels = [int(re.search('.*_pn_(\d+)', name).group(1)) \
for name in df_node.ix[pn_ind].name]
fmt = lambda x, pos: '%2.2f' % (float(x)/1e4)
with h5py.File('./data/olfactory_input.h5', 'r') as fi, \
h5py.File('olfactory_output_spike.h5', 'r') as fo:
data_i = fi['array'].value
data_o = fo['array'].value
mpl.rcParams['figure.dpi'] = 120
mpl.rcParams['figure.figsize'] = (12,9)
raster = lambda data: plt.eventplot([np.nonzero(data[i, :])[0] for i in xrange(data.shape[0])],
colors = [(0, 0, 0)],
lineoffsets = np.arange(data.shape[0]),
linelengths = np.ones(data.shape[0])/2.0)
f = plt.figure()
plt.subplot(311)
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
plt.plot(data_i[:10000,0])
ax.set_ylim(np.min(data_i)-1, np.max(data_i)+1)
ax.set_xlim(0, 10000)
plt.title('Input Stimulus'); plt.ylabel('Concentration')
plt.subplot(312)
raster(data_o.T[osn_ind, :])
plt.title('Spikes Generated by OSNs'); plt.ylabel('OSN #');
ax = plt.gca()
ax.set_ylim(np.min(osn_ind_labels), np.max(osn_ind_labels))
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=5.0))
plt.subplot(313)
raster(data_o.T[pn_ind, :])
plt.title('Spikes Generated by PNs'); plt.ylabel('PN #');
ax = plt.gca()
ax.set_ylim(np.min(pn_ind_labels)-0.5, np.max(pn_ind_labels)+0.5)
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=1.0))
plt.xlabel('time (s)')
plt.subplots_adjust()
f.savefig('olfactory_output.png')
| bsd-3-clause |
manterd/myPhyloDB | functions/analysis/rf_graphs.py | 1 | 38106 | import datetime
from django.http import HttpResponse
import logging
from natsort import natsorted
import pandas as pd
from PyPDF2 import PdfFileReader, PdfFileMerger
from pyper import *
import json
import functions
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getRF(request, stops, RID, PID):
try:
if request.is_ajax():
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 5: Reading normalized data file...') # No?
functions.setBase(RID, 'Step 2 of 5 Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
result = ''
treeType = int(all['treeType'])
if treeType == 1:
if selectAll == 1:
result += 'Taxa level: Kingdom' + '\n'
elif selectAll == 2:
result += 'Taxa level: Phyla' + '\n'
elif selectAll == 3:
result += 'Taxa level: Class' + '\n'
elif selectAll == 4:
result += 'Taxa level: Order' + '\n'
elif selectAll == 5:
result += 'Taxa level: Family' + '\n'
elif selectAll == 6:
result += 'Taxa level: Genus' + '\n'
elif selectAll == 7:
result += 'Taxa level: Species' + '\n'
elif selectAll == 9:
result += 'Taxa level: OTU_99' + '\n'
elif treeType == 2:
if keggAll == 1:
result += 'KEGG Pathway level: 1' + '\n'
elif keggAll == 2:
result += 'KEGG Pathway level: 2' + '\n'
elif keggAll == 3:
result += 'KEGG Pathway level: 3' + '\n'
elif treeType == 3:
if nzAll == 1:
result += 'KEGG Enzyme level: 1' + '\n'
elif nzAll == 2:
result += 'KEGG Enzyme level: 2' + '\n'
elif nzAll == 3:
result += 'KEGG Enzyme level: 3' + '\n'
elif nzAll == 4:
result += 'KEGG Enzyme level: 4' + '\n'
elif keggAll == 5:
result += 'KEGG Enzyme level: GIBBs' + '\n'
elif keggAll == 6:
result += 'KEGG Enzyme level: Nitrogen cycle' + '\n'
# Select samples and meta-variables from savedDF
metaValsCat = all['metaValsCat']
metaIDsCat = all['metaIDsCat']
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar)
allFields = catFields + quantFields
if not catFields and not quantFields:
error = "Selected categorical variable(s) contain only one level.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 2 of 5: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 5: Selecting your chosen taxa or KEGG level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
mapTaxa = 'no'
finalDF = pd.DataFrame()
if treeType == 1:
if selectAll != 8:
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remZeroes, perZeroes, filterData, filterPer, filterMeth)
else:
filteredDF = savedDF.copy()
finalDF, missingList = functions.getTaxaDF(selectAll, '', filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
finalDF, allDF = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
finalDF, allDF = functions.getNZDF(nzAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if finalDF.empty:
error = "Selected taxa were not found in your selected samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
# make sure column types are correct
if catFields:
finalDF[catFields] = finalDF[catFields].astype(str)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/rf/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
count_rDF = pd.DataFrame()
if DepVar == 0:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund')
elif DepVar == 1:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rel_abund')
elif DepVar == 2:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rich')
elif DepVar == 3:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='diversity')
elif DepVar == 4:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund_16S')
count_rDF.fillna(0, inplace=True)
functions.setBase(RID, 'Step 3 of 5: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...')
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from cran
r("list.of.packages <- c('caret', 'randomForest', 'NeuralNetTools', 'e1071', 'stargazer', 'stringr', 'ROCR')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...')
r('library(caret)')
r('library(reshape2)')
r('library(RColorBrewer)')
r('library(ROCR)')
r("library(plyr)")
r('library(stargazer)')
r('library(stringr)')
r('source("R/myFunctions/myFunctions.R")')
method = all['Method']
if method == 'rf':
r('library(randomForest)')
elif method == 'nnet':
r('library(NeuralNetTools)')
elif method == 'svm':
r('library(e1071)')
# Wrangle data into R
rankNameDF = finalDF.drop_duplicates(subset='rank_id', keep='last')
rankNameDF.sort_values('rank_id', inplace=True)
if treeType == 3 and nzAll >= 5:
rankNameDF.loc[:, 'name_id'] = rankNameDF['rank_name'].str.split(': ').str[0]
else:
rankNameDF.loc[:, 'name_id'] = rankNameDF[['rank_name', 'rank_id']].apply(lambda x: ' id: '.join(x), axis=1)
r.assign('rankNames', rankNameDF.name_id.values)
count_rDF.sort_index(axis=0, inplace=True)
r.assign("treeType", treeType)
r.assign("data", count_rDF)
r("names(data) <- rankNames")
myList = list(metaDF.select_dtypes(include=['object']).columns)
for i in myList:
metaDF[i] = metaDF[i].str.replace(' ', '_')
metaDF[i] = metaDF[i].str.replace('-', '.')
metaDF[i] = metaDF[i].str.replace('(', '.')
metaDF[i] = metaDF[i].str.replace(')', '.')
metaDF.sort_values('sampleid', inplace=True)
metaDF.set_index('sampleid', inplace=True)
r.assign("meta_full", metaDF)
r.assign("rows", metaDF.index.values.tolist())
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# Predictors
r("X_full = data")
r("nzv_cols <- nearZeroVar(X_full)")
r("if(length(nzv_cols > 0)) X_full <- X_full[,-nzv_cols]")
r("n.vars <- ncol(data)")
# Response
r.assign("allFields", allFields)
r("Y_full = meta_full")
# Subset train data
trainIDs = all['trainArray']
r.assign("trainIDs", trainIDs)
r("X <- X_full[row.names(X_full) %in% trainIDs,]")
r("meta <- meta_full[row.names(meta_full) %in% trainIDs,]")
r("Y <- Y_full[row.names(Y_full) %in% trainIDs,]")
r("Y <- Y[,paste(allFields)]")
r("myData <- data.frame(Y, X)")
# Subset test data
testIDs = list(all['testArray'])
if testIDs:
r.assign("testIDs", testIDs)
r("X_test <- X_full[row.names(X_full) %in% testIDs,]")
r("meta_test <- meta_full[row.names(meta_full) %in% testIDs,]")
r("Y_test <- Y_full[row.names(Y_full) %in% testIDs,]")
r("Y_test <- Y_test[,paste(allFields)]")
r("myData_test <- data.frame(Y_test, X_test)")
r("nameVec <- c('Y_test', names(X_test))")
r("nameVec <- make.names(nameVec)")
r("names(myData_test) <- nameVec")
# Initialize R output to pdf
path = 'myPhyloDB/media/temp/rf/Rplots/%s' % RID
if not os.path.exists(path):
os.makedirs(path)
r.assign("path", path)
r.assign("RID", RID)
r("pdf_counter <- 1")
finalDict = {}
# set up tuneGrid
if method == 'rf':
r("method <- 'rf' ")
r("title <- 'Random Forest' ")
r("grid <- expand.grid(.mtry=seq(1, nrow(myData), by=ceiling(nrow(myData)/50) ))")
elif method == 'nnet':
r("method <- 'nnet' ")
r("grid <- expand.grid(.size=seq(1:5), .decay=seq(0, 2, 0.5))")
r("title <- 'Neural Network' ")
elif method == 'svm':
r("method <- 'svmLinear2' ")
r("grid <- expand.grid(.cost=seq(1:10))")
r("title <- 'Support Vector Machine' ")
trainMethod = all['trainMethod']
r.assign("trainMethod", trainMethod)
number1 = int(all['number1'])
r.assign("number1", number1)
number2 = int(all['number2'])
r.assign("number2", number2)
repeats = int(all['repeats'])
r.assign("repeats", repeats)
proportion = float(all['proportion'])
r.assign("proportion", proportion)
if trainMethod == 'boot':
if catFields:
r("ctrl <- trainControl(method='boot', number=number2, \
classProbs=T, savePredictions=T)")
if quantFields:
r("ctrl <- trainControl(method='boot', number=number2, \
classProbs=F, savePredictions=T)")
elif trainMethod == 'cv':
if catFields:
r("ctrl <- trainControl(method='cv', number=number1, \
classProbs=T, savePredictions=T)")
if quantFields:
r("ctrl <- trainControl(method='cv', number=number1, \
classProbs=F, savePredictions=T)")
elif trainMethod == 'repeatedcv':
if catFields:
r("ctrl <- trainControl(method='repeatedcv', number=number1, \
repeats=repeats, classProbs=T, savePredictions=T)")
if quantFields:
r("ctrl <- trainControl(method='repeatedcv', number=number1, \
repeats=repeats, classProbs=F, savePredictions=T)")
elif trainMethod == 'LOOCV':
if catFields:
r("ctrl <- trainControl(method='LOOCV', number=number1, \
classProbs=T, savePredictions=T)")
if quantFields:
r("ctrl <- trainControl(method='LOOCV', number=number1, \
classProbs=F, savePredictions=T)")
elif trainMethod == 'LGOCV':
if catFields:
r("ctrl <- trainControl(method='LGOCV', number=number1, \
p=proportion, classProbs=T, savePredictions=T)")
if quantFields:
r("ctrl <- trainControl(method='LGOCV', number=number1, \
p=proportion, classProbs=F, savePredictions=T)")
if catFields:
r("fit <- train(Y ~ ., data=myData, method=method, linout=F, trace=F, trControl=ctrl, \
tuneGrid=grid, importance=T, preProcess=c('center', 'scale'))")
if quantFields:
r("fit <- train(Y ~ ., data=myData, method=method, linout=T, trace=F, trControl=ctrl, \
tuneGrid=grid, importance=T, preProcess=c('center', 'scale'))")
r("predY <- predict(fit)")
r("if (exists('predY')) {fitError <- FALSE} else {fitError <- TRUE}")
fitError = r.get("fitError")
if fitError:
myDict = {'error': "Model could not be fit:\nPlease try a different model"}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
else:
result += str(r('print(fit)')) + '\n'
result += '===============================================\n'
if catFields:
r("vi <- varImp(fit, scale=F)")
r("varDF = as.data.frame(vi[[1]])")
r("goodNameVec <- names(X)")
r("badNameVec <- names(myData)[2:length(names(myData))]")
r("row.names(varDF) <- mapvalues(row.names(varDF), from=badNameVec, to=goodNameVec)")
r("rankDF <- apply(-abs(varDF), 2, rank, ties.method='random')")
r("rankDF <- (rankDF <= 6)")
r("rankDF <- rankDF * 1")
r("myFilter <- as.vector(rowSums(rankDF) > 0)")
r("fVarDF <- varDF[myFilter,]")
r("fVarDF['rank_id'] <- row.names(fVarDF)")
r("graphDF <- melt(fVarDF, id='rank_id')")
r("graphDF$rank_id <- gsub(' ', '.', graphDF$rank_id)")
r("graphDF$rank_id <- gsub(':', '.', graphDF$rank_id)")
r("myVec <- unlist(str_split_fixed(as.character(graphDF$rank_id), '\\.id\\.\\.', 2))")
r("graphDF$taxa <- myVec[,1]")
r("graphDF$id <- myVec[,2]")
r("graphDF <- graphDF[with(graphDF, order(taxa, id)),] ")
r("pdf_counter <- pdf_counter + 1")
r("p <- ggplot(graphDF, aes(x=variable, y=value, fill=rank_id))")
r("parse_labels <- function(value) { \
myVec <- unlist(str_split_fixed(value, '\\.id\\.\\.', 2)); \
myVec[,1]; \
}")
r("p <- p + facet_wrap(~rank_id, nc=4, labeller=as_labeller(parse_labels))")
r("p <- p + geom_bar(stat='identity', alpha=0.9, colour='black', size=0.1)")
r("p <- p + theme(axis.ticks=element_line(size = 0.2))")
r("p <- p + theme(strip.text.x=element_text(size=7, colour='blue', angle=0))")
r("p <- p + theme(legend.position='none')")
r("p <- p + theme(axis.title.y=element_text(size=10))")
r("p <- p + theme(axis.text.x=element_text(size=7, angle=90, hjust=1, vjust=0.5))")
r("p <- p + theme(axis.text.y=element_text(size=6))")
r("p <- p + theme(plot.title=element_text(size=12))")
r("p <- p + theme(plot.subtitle=element_text(size=9))")
r("p <- p + labs(y='Importance', x='', \
title=title, \
subtitle='Importance (top 6 for each factor)')")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("char.width <- max(nchar(as.character(graphDF$rank_id)))/35")
r("bar.width <- 0.2+(nlevels(as.factor(graphDF$rank_id))*0.05)")
r("panel.width <- max(char.width, bar.width)")
r("n_wrap <- ceiling(nlevels(as.factor(graphDF$rank_id))/4)")
r("p <- set_panel_size(p, height=unit(1.25, 'in'), width=unit(panel.width, 'in'))")
r("char.width <- max(nchar(as.character(graphDF$variable)))/35")
r("ggsave(filename=file, plot=p, units='in', height=2+(1.5*n_wrap)+char.width, width=2+(4*panel.width))")
# graph probabilites for each training sample
r("probY <- predict(fit, type='prob')")
r("myFactors <- levels(Y)")
r("nFactors <- nlevels(Y)")
r("tempDF <- cbind(meta, probY)")
r("tempDF['sampleid'] = row.names(meta)")
r("graphDF <- melt(tempDF, id.vars=c('sampleid', 'sample_name', allFields), measure.vars=myFactors)")
r("names(graphDF) <- c('sampleid', 'sample_name', 'obs', 'variable', 'value')")
r("pdf_counter <- pdf_counter + 1")
r("p <- ggplot(graphDF, aes(x=sampleid, y=value, fill=variable))")
r("p <- p + geom_bar(stat='identity', alpha=0.9, colour='black', size=0.1)")
r("p <- p + facet_wrap(~ obs, scales='free_x', nc=3)")
r("p <- p + scale_x_discrete(labels=element_blank())")
r("p <- p + theme(strip.text.x=element_text(size=7, colour='blue', angle=0))")
r("p <- p + theme(axis.ticks.x=element_blank())")
r("p <- p + theme(axis.ticks.y=element_line(size = 0.2))")
r("p <- p + theme(legend.title=element_blank())")
r("p <- p + theme(legend.text=element_text(size=6))")
r("p <- p + theme(axis.title.y=element_text(size=10))")
r("p <- p + theme(axis.text.y=element_text(size=6))")
r("p <- p + theme(plot.title=element_text(size=12))")
r("p <- p + theme(plot.subtitle=element_text(size=9))")
r("p <- p + labs(y='Probability', x='', \
title=title, \
subtitle='Training Dataset: probabilities')")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("p <- set_panel_size(p, height=unit(1, 'in'), width=unit(1.5, 'in'))")
r("n_wrap <- ceiling(nlevels(graphDF$obs)/3)")
r("ggsave(filename=file, plot=p, units='in', height=2+(1.25*n_wrap), width=2+6)")
# graph probabilities for each test sample
if testIDs:
r("probY_test <- predict(fit, myData_test, type='prob')")
r("myFactors <- levels(Y_test)")
r("nFactors <- nlevels(Y_test)")
r("tempDF <- cbind(meta_test, probY_test)")
r("tempDF['sampleid'] = row.names(meta_test)")
r("graphDF <- melt(tempDF, id.vars=c('sampleid', 'sample_name', allFields), measure.vars=myFactors)")
r("names(graphDF) <- c('sampleid', 'sample_name', 'obs', 'variable', 'value')")
r("pdf_counter <- pdf_counter + 1")
r("p <- ggplot(graphDF, aes(x=sampleid, y=value, fill=variable))")
r("p <- p + geom_bar(stat='identity', alpha=0.9, colour='black', size=0.1)")
r("p <- p + facet_wrap(~ obs, scales='free_x', nc=3)")
r("p <- p + scale_x_discrete(labels=element_blank())")
r("p <- p + theme(strip.text.x=element_text(size=7, colour='blue', angle=0))")
r("p <- p + theme(axis.ticks.x=element_blank())")
r("p <- p + theme(axis.ticks.y=element_line(size = 0.2))")
r("p <- p + theme(legend.title=element_blank())")
r("p <- p + theme(legend.text=element_text(size=6))")
r("p <- p + theme(axis.title.y=element_text(size=10))")
r("p <- p + theme(axis.text.y=element_text(size=6))")
r("p <- p + theme(plot.title=element_text(size=12))")
r("p <- p + theme(plot.subtitle=element_text(size=9))")
r("p <- p + labs(y='Probability', x='', \
title=title, \
subtitle='Test dataset: assignment probabilities')")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("p <- set_panel_size(p, height=unit(1, 'in'), width=unit(1.5, 'in'))")
r("n_wrap <- ceiling(nlevels(obs)/3)")
r("ggsave(filename=file, plot=p, units='in', height=2+(1.25*n_wrap), width=2+6)")
# graph probabilities by taxa
r("probY <- predict(fit, type='prob')")
r("newX <- X[,myFilter]")
r("tempDF <- cbind(newX, Y, probY)")
r("tempDF['sampleid'] <- row.names(tempDF)")
r("myFactors <- levels(Y)")
r("myTaxa <- names(newX)")
r("df1 <- melt(tempDF, id.vars=c('sampleid', 'Y', myTaxa), measure.vars=myFactors)")
r("names(df1) <- c('sampleid', 'Y', myTaxa, 'variable', 'prob')")
r("dfeq <- df1[df1$Y==df1$variable,]")
r("graphDF <- melt(dfeq, id.vars=c('variable', 'prob'), measure.vars=myTaxa)")
r("names(graphDF) <- c('trt', 'prob', 'rank_id', 'count')")
r("graphDF$rank_id <- gsub(' ', '.', graphDF$rank_id)")
r("graphDF$rank_id <- gsub(':', '.', graphDF$rank_id)")
r("myVec <- unlist(str_split_fixed(as.character(graphDF$rank_id), '\\.id\\.\\.', 2))")
r("graphDF$taxa <- myVec[,1]")
r("graphDF$id <- myVec[,2]")
r("graphDF <- graphDF[with(graphDF, order(taxa, id)),] ")
r("pdf_counter <- pdf_counter + 1")
r("par(mar=c(2,2,1,1),family='serif')")
r("p <- ggplot(graphDF, aes(x=count, y=prob, colour=trt))")
r("parse_labels <- function(value) { \
myVec <- unlist(str_split_fixed(value, '\\.id\\.\\.', 2)); \
myVec[,1]; \
}")
r("p <- p + facet_wrap(~rank_id, nc=4, labeller=as_labeller(parse_labels))")
r("p <- p + geom_point(size=0.5)")
r("p <- p + scale_x_log10()")
r("p <- p + theme(strip.text.x=element_text(size=7, colour='blue', angle=0))")
r("p <- p + theme(legend.title=element_blank())")
r("p <- p + theme(legend.text=element_text(size=6))")
r("p <- p + theme(axis.title=element_text(size=10))")
r("p <- p + theme(axis.text.x=element_text(size=7, angle=0))")
r("p <- p + theme(axis.text.y=element_text(size=6))")
r("p <- p + theme(plot.title=element_text(size=12))")
r("p <- p + theme(plot.subtitle=element_text(size=9))")
r("p <- p + labs(y='Probability', x='Abundance', \
title=title, \
subtitle='Probability of correct sample assignment vs taxa abundance')")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("char.width <- max(nchar(as.character(graphDF$rank_id)))/35")
r("bar.width <- 1.5")
r("panel.width <- max(char.width, bar.width)")
r("n_wrap <- ceiling(nlevels(as.factor(graphDF$rank_id))/4)")
r("p <- set_panel_size(p, height=unit(1.25, 'in'), width=unit(panel.width, 'in'))")
r("char.width <- max(nchar(as.character(graphDF$count)))/35")
r("ggsave(filename=file, plot=p, units='in', height=2+(1.5*n_wrap)+char.width, width=2+(4*panel.width))")
# confusion matrix - train
r("tab <- table(Observed=Y, Predicted=predY)")
r("cm <- confusionMatrix(tab)")
result += '\nTraining Dataset\n'
result += str(r('print(cm)')) + '\n'
result += '===============================================\n'
# confusion matrix - test
if testIDs:
r("predY_test <- predict(fit, myData_test)")
r("tab <- table(Observed=Y_test, Predicted=predY_test)")
r("cm <- confusionMatrix(tab)")
result += '\nTest Dataset\n'
result += str(r('print(cm)')) + '\n'
result += '===============================================\n'
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if quantFields:
r("vi <- varImp(fit, scale=F)")
r("varDF = as.data.frame(vi[[1]])")
r("goodNameVec <- names(X)")
r("badNameVec <- names(myData)[2:length(names(myData))]")
r("row.names(varDF) <- mapvalues(row.names(varDF), from=badNameVec, to=goodNameVec)")
r("rankDF <- apply(-varDF, 2, rank, ties.method='random')")
r("rankDF <- (rankDF <= 10)")
r("rankDF <- rankDF * 1")
r("myFilter <- as.vector((rowSums(rankDF) > 0))")
r("Overall <- varDF[myFilter,]")
r("rank_id <- row.names(varDF)[myFilter]")
r("graphDF <- data.frame(rank_id, Overall)")
r("myVec <- unlist(str_split_fixed(as.character(graphDF$rank_id), '\\.id\\.\\.', 2))")
r("graphDF$taxa <- myVec[,1]")
r("graphDF$id <- myVec[,2]")
r("unique <- make.unique(as.vector(graphDF$taxa))")
r("graphDF['taxa2'] <- unique")
r("pdf_counter <- pdf_counter + 1")
r("par(mar=c(2,2,1,1),family='serif')")
r("p <- ggplot(graphDF, aes(x=taxa2, y=Overall))")
r("p <- p + geom_bar(stat='identity', alpha=0.9, , fill='blue', colour='black', size=0.1)")
r("p <- p + theme(axis.ticks=element_line(size = 0.2))")
r("p <- p + theme(strip.text.y=element_text(size=7, colour='blue', angle=0))")
r("p <- p + theme(legend.position='none')")
r("p <- p + theme(axis.title.y=element_text(size=10))")
r("p <- p + theme(axis.text.x = element_text(size=7, angle = 90))")
r("p <- p + theme(axis.text.y = element_text(size=6))")
r("p <- p + theme(plot.title = element_text(size=12))")
r("p <- p + theme(plot.subtitle = element_text(size=9))")
r("p <- p + labs(y='Importance', x='', \
title=title, \
subtitle='Overall importance (top 10)')")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("panel.width <- nlevels(as.factor(graphDF$rank_id))*0.2")
r("p <- set_panel_size(p, height=unit(2, 'in'), width=unit(panel.width, 'in'))")
r("charWidth <- max(nchar(graphDF$taxa2))/12")
r("ggsave(filename=file, plot=p, units='in', height=1+(2)+charWidth, width=1+panel.width)")
r("pdf_counter <- pdf_counter + 1")
r("graphDF <- data.frame(x=Y, y=predY)")
r("p <- ggplot(graphDF, aes(x=x, y=y, color='blue'))")
r("p <- p + geom_abline(yintercept=0, slope=1, color='gray')")
r("p <- p + geom_point()")
r("p <- p + geom_smooth(method='lm', colour='black', se=F)")
r("p <- p + xlim(range(Y, predY))")
r("p <- p + ylim(range(Y, predY))")
r("p <- p + labs(y='Predicted', x='Observed', \
title=title, \
subtitle='Predicted vs Observed')")
# Add test data if available
if testIDs:
r("probY_test <- predict(fit, myData_test, type='prob')")
r("predY_test <- predict(fit, myData_test)")
r("graphDF2 <- data.frame(x=Y_test, y=predY_test)")
r("p <- p + geom_point(data=graphDF2, aes(x=x, y=y, color='red'))")
r("p <- p + geom_smooth(data=graphDF2, method='lm', colour='black', se=F)")
r("p <- p + theme(legend.position='right')")
r("p <- p + scale_color_manual(name='Dataset', values=c('blue', 'red'), labels=c('Train', 'Test'))")
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("ggsave(filename=file, plot=p, units='in', height=4, width=4)")
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# TODO 1.4 Add ROC curve - this does not currently work
'''
# ROC Curve
if testIDs:
r("pdf_counter <- pdf_counter + 1")
print r("probY_test <- predict(fit, myData_test, type='prob')")
print r("predY_test <- prediction(probY_test, myData_test$Y)")
print r("perf <- performance(predY_test, measure='tpr', x.measure='fpr')")
print r('auc <- performance(predY_test, measure = "auc")')
print r('auc <- [email protected][[1]]')
print r('roc.data <- data.frame(fpr=unlist([email protected]), \
tpr=unlist([email protected]), \
model="GLM")')
print r('ggplot(roc.data, aes(x=fpr, ymin=0, ymax=tpr)) + \
geom_ribbon(alpha=0.2) + \
geom_line(aes(y=tpr)) + \
ggtitle(paste0("ROC Curve w/ AUC=", auc))')
r("file <- paste(path, '/rf_temp', pdf_counter, '.pdf', sep='')")
r("ggsave(filename=file, plot=p, units='in', height=4, width=4)")
'''
r("myTable <- stargazer(varDF, type='text', summary=F, rownames=T)")
result += 'Variable Importance\n'
myString = r.get("myTable")
result += str(myString)
result += '\n===============================================\n'
if catFields:
r("mergeDF <- cbind(meta, probY)")
r("myTable <- stargazer(mergeDF, type='text', summary=F, rownames=T)")
result += 'Train Dataset: Probabilities\n'
myString = r.get("myTable")
result += str(myString)
result += '\n===============================================\n'
r("mergeDF <- cbind(meta_test, probY_test)")
r("myTable <- stargazer(mergeDF, type='text', summary=F, rownames=T)")
result += 'Test Dataset: Probabilities\n'
myString = r.get("myTable")
result += str(myString)
result += '\n===============================================\n'
if quantFields:
r("mergeDF <- cbind(meta, predY)")
r("myTable <- stargazer(mergeDF, type='text', summary=F, rownames=T)")
result += 'Train Dataset: Observed vs. Predicted\n'
myString = r.get("myTable")
result += str(myString)
result += '\n===============================================\n'
r("mergeDF <- cbind(meta_test, predY_test)")
r("myTable <- stargazer(mergeDF, type='text', summary=F, rownames=T)")
result += 'Test Dataset: Observed vs. Predicted\n'
myString = r.get("myTable")
result += str(myString)
result += '\n===============================================\n'
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# Combining Pdf files
finalFile = 'myPhyloDB/media/temp/rf/Rplots/' + str(RID) + '/rf_final.pdf'
pdf_files = [f for f in os.listdir(path) if f.endswith("pdf")]
pdf_files = natsorted(pdf_files, key=lambda y: y.lower())
merger = PdfFileMerger()
for filename in pdf_files:
merger.append(PdfFileReader(file(os.path.join(path, filename), 'rb')))
merger.write(finalFile)
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 5 of 5: Formatting graph data...')
r("options(width=5000)")
finalDict['text'] = result
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| gpl-3.0 |
johnchase/scikit-bio | skbio/stats/tests/test_gradient.py | 5 | 54893 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from future.builtins import zip
from operator import attrgetter
from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.util.testing as pdt
from skbio.util import get_data_path, assert_data_frame_almost_equal
from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
TrajectoryGradientANOVA,
FirstDifferenceGradientANOVA,
WindowDifferenceGradientANOVA, GroupResults,
CategoryResults, GradientANOVAResults,
_weight_by_vector, _ANOVA_trajectories)
class BaseTests(TestCase):
def setUp(self):
"""Initializes some data for testing"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368, 0.03532727349,
-0.254450494129, -0.0687468542543,
0.231895596562, 0.00496549154314,
-0.0026246871695, 9.73837390723e-10]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992, 0.0957182357964,
0.204714844022, -0.0055407341857,
-0.190287966833, 0.16307126638,
9.73837390723e-10]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198, -0.00316535032886,
0.114635191853, -0.00019194106125,
0.188557853937, 0.030002427212,
9.73837390723e-10]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689, 0.29318228566, -0.167812539312,
0.130996149793, 0.113551017379, 0.109987942454,
9.73837390723e-10]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256, 0.100446653327,
0.123802521199, 0.1285839664, -0.132852841046,
-0.217514322505, 9.73837390723e-10]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723, -0.18300251046, 0.136208248567,
-0.0989435556722, 0.0927738484879,
0.0909429797672, 9.73837390723e-10]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424, -0.0225473129718,
-0.205287183891, -0.180224615141,
-0.165277751908, 0.0411933458557,
9.73837390723e-10]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617, -0.116066751485,
-0.158763393475, -0.0223918378516,
-0.0263068046112, -0.0501209518091,
9.73837390723e-10]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289, 0.0898852445906,
0.0212491652909, -0.184183028843,
0.114877153051, -0.164938000185,
9.73837390723e-10])
}
self.coords = pd.DataFrame.from_dict(coord_data, orient='index')
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
self.coords_3axes = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.354': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '60',
'Description': 'Control_mouse_I.D._354'},
'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.356': {'Treatment': 'Control',
'DOB': '20061126',
'Weight': '50',
'Description': 'Control_mouse_I.D._356'},
'PC.481': {'Treatment': 'Control',
'DOB': '20070314',
'Weight': '52',
'Description': 'Control_mouse_I.D._481'},
'PC.593': {'Treatment': 'Control',
'DOB': '20071210',
'Weight': '57',
'Description': 'Control_mouse_I.D._593'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
self.metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
self.prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787, 11.6913885817, 9.83044890697,
8.51253468595, 7.88775505332, 6.56308246609,
4.42499350906e-16])
gr_wo_msg = GroupResults('Foo', np.array([-2.6750, -0.2510,
-2.8322, 0.]),
-1.4398, {'mean': -1.4398, 'std': 1.3184},
None)
gr_w_msg = GroupResults('Bar', np.array([9.6823, 2.9511, 5.2434]),
5.9589, {'mean': 5.9589, 'std': 2.7942},
"Cannot calculate the first difference "
"with a window of size (3).")
self.groups = [gr_wo_msg, gr_w_msg]
cr_no_data = CategoryResults('foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
cr_data = CategoryResults('bar', 0.0110, self.groups, None)
self.categories = [cr_no_data, cr_data]
vr = GradientANOVAResults('wdiff', True, self.categories)
description = CategoryResults('Description', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
control_group = GroupResults('Control', np.array([2.3694, 3.3716,
5.4452, 4.5704,
4.4972]),
4.0508, {'avg': 4.0508}, None)
fast_group = GroupResults('Fast', np.array([7.2220, 4.2726, 1.1169,
4.0271]),
4.1596, {'avg': 4.1596}, None)
treatment = CategoryResults('Treatment', 0.9331,
[control_group, fast_group], None)
vr_real = GradientANOVAResults('avg', False, [description, weight, dob,
treatment])
self.vec_results = [vr, vr_real]
# This function makes the comparisons between the results classes easier
def assert_group_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.name, exp.name)
npt.assert_almost_equal(obs.trajectory, exp.trajectory)
npt.assert_almost_equal(obs.mean, exp.mean)
self.assertEqual(obs.info.keys(), exp.info.keys())
for key in obs.info:
npt.assert_almost_equal(obs.info[key], exp.info[key])
self.assertEqual(obs.message, exp.message)
def assert_category_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.category, exp.category)
if exp.probability is None:
self.assertTrue(obs.probability is None)
self.assertTrue(obs.groups is None)
else:
npt.assert_almost_equal(obs.probability, exp.probability)
for o, e in zip(sorted(obs.groups, key=attrgetter('name')),
sorted(exp.groups, key=attrgetter('name'))):
self.assert_group_results_almost_equal(o, e)
def assert_gradientANOVA_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.algorithm, exp.algorithm)
self.assertEqual(obs.weighted, exp.weighted)
for o, e in zip(sorted(obs.categories, key=attrgetter('category')),
sorted(exp.categories, key=attrgetter('category'))):
self.assert_category_results_almost_equal(o, e)
class GradientTests(BaseTests):
def test_weight_by_vector(self):
"""Correctly weights the vectors"""
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 5, 8, 12, 45, 80, 85, 90]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([6.3571428571]),
's3': np.array([12.7142857142]),
's4': np.array([12.7142857142]),
's5': np.array([1.9264069264]),
's6': np.array([2.1795918367]),
's7': np.array([17.8]),
's8': np.array([20.3428571428])},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]), 's2': np.array([2]),
's3': np.array([3]), 's4': np.array([4]),
's5': np.array([5]), 's6': np.array([6]),
's7': np.array([7]), 's8': np.array([8])
},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s2': np.array([2]), 's3': np.array([3]),
's4': np.array([4]), 's5': np.array([5]),
's6': np.array([6])}, orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4]),
['s1', 's2', 's3', 's4']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index').astype(np.float64)
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
sample_ids = ['PC.356', 'PC.481', 'PC.355', 'PC.593', 'PC.354']
trajectory = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.79151484,
-0.70387996,
1.89223152]),
'PC.355': np.array([6.05869624,
3.44821245,
-0.42595788]),
'PC.593': np.array([5.18731945,
-1.81714206,
4.26216485]),
'PC.354': np.array([7.07588529,
-0.53917873,
0.89389158])
}, orient='index')
w_vector = pd.Series(np.array([50, 52, 55, 57, 60]),
sample_ids).astype(np.float64)
exp = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.98939355,
-0.87984995,
2.3652894]),
'PC.355': np.array([5.04891353,
2.87351038,
-0.3549649]),
'PC.593': np.array([6.48414931,
-2.27142757,
5.32770606]),
'PC.354': np.array([5.89657108,
-0.44931561,
0.74490965])
}, orient='index')
obs = _weight_by_vector(trajectory.ix[sample_ids],
w_vector[sample_ids])
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
def test_weight_by_vector_single_element(self):
trajectory = pd.DataFrame.from_dict({'s1': np.array([42])},
orient='index')
w_vector = pd.Series(np.array([5]), ['s1']).astype(np.float64)
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs, trajectory)
def test_weight_by_vector_error(self):
"""Raises an error with erroneous inputs"""
# Different vector lengths
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3])
# Inputs are not iterables
with self.assertRaises(TypeError):
_weight_by_vector(9, 1)
# Weighting vector is not a gradient
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3, 3])
def test_ANOVA_trajectories(self):
"""Correctly performs the check before running ANOVA"""
# Only one group in a given category
group = GroupResults('Bar', np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264, {'avg': 4.0508056626409275}, None)
obs = _ANOVA_trajectories('Foo', [group])
exp = CategoryResults('Foo', None, None,
'Only one value in the group.')
self.assert_category_results_almost_equal(obs, exp)
# One element have only one element
group2 = GroupResults('FooBar', np.array([4.05080566264]),
4.05080566264, {'avg': 4.05080566264}, None)
obs = _ANOVA_trajectories('Foo', [group, group2])
exp = CategoryResults('Foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
self.assert_category_results_almost_equal(obs, exp)
gr1 = GroupResults('Foo', np.array([-0.219044992, 0.079674486,
0.09233683]),
-0.015677892, {'avg': -0.015677892}, None)
gr2 = GroupResults('Bar', np.array([-0.042258081, 0.000204041,
0.024837603]),
-0.0732878716, {'avg': -0.0732878716}, None)
gr3 = GroupResults('FBF', np.array([0.080504323, -0.212014503,
-0.088353435]),
-0.0057388123, {'avg': -0.0057388123}, None)
obs = _ANOVA_trajectories('Cat', [gr1, gr2, gr3])
exp = CategoryResults('Cat', 0.8067456876, [gr1, gr2, gr3], None)
self.assert_category_results_almost_equal(obs, exp)
class GroupResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['gr_wo_msg_out', 'gr_w_msg_out']
raw_paths = ['gr_wo_msg_raw', 'gr_w_msg_raw']
for gr, out_fp, raw_fp in zip(self.groups, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
gr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class CategoryResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['cr_no_data_out', 'cr_data_out']
raw_paths = ['cr_no_data_raw', 'cr_data_raw']
for cat, out_fp, raw_fp in zip(self.categories, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
cat.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVAResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['vr_out']
raw_paths = ['vr_raw']
for vr, out_fp, raw_fp in zip(self.vec_results, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
vr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVATests(BaseTests):
def test_init(self):
"""Correctly initializes the class attributes"""
# Note self._groups is tested on test_make_groups
# so we are not testing it here
# Test with weighted = False
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
exp_prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787])
npt.assert_equal(bv._prop_expl, exp_prop_expl)
assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
self.assertTrue(bv._weighting_vector is None)
self.assertFalse(bv._weighted)
# Test with weighted = True
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Weight', weighted=True)
assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
npt.assert_equal(bv._prop_expl, exp_prop_expl)
assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
exp_weighting_vector = pd.Series(
np.array([60, 55, 50, 52, 57, 65, 68, 70, 72]),
['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',
'PC.634', 'PC.635', 'PC.636'], name='Weight'
).astype(np.float64)
pdt.assert_series_equal(bv._weighting_vector, exp_weighting_vector)
self.assertTrue(bv._weighted)
def test_init_error(self):
"""Raises an error with erroneous inputs"""
# Raises ValueError if any category in trajectory_categories is not
# present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['foo'])
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Weight', 'Treatment', 'foo'])
# Raises ValueError if sort_category is not present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='foo')
# Raises ValueError if weighted == True and sort_category == None
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
weighted=True)
# Raises ValueError if weighted == True and the values under
# sort_category are not numerical
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Treatment', weighted=True)
# Raises ValueError if axes > len(prop_expl)
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=10)
# Raises ValueError if axes < 0
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=-1)
def test_normalize_samples(self):
"""Correctly normalizes the samples between coords and metadata_map"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
# Takes a subset from metadata_map
bv = GradientANOVA(subset_coords, self.prop_expl, self.metadata_map)
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from coords
bv = GradientANOVA(self.coords, self.prop_expl, subset_metadata_map)
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from metadata_map and coords at the same time
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
bv = GradientANOVA(subset_coords, self.prop_expl, subset_metadata_map)
exp_coords = pd.DataFrame.from_dict(
{'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])},
orient='index')
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
exp_coords.sort(axis=0))
exp_metadata_map = pd.DataFrame.from_dict(
{'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'}},
orient='index')
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
exp_metadata_map.sort(axis=0))
def test_normalize_samples_error(self):
"""Raises an error if coords and metadata_map does not have samples in
common"""
error_metadata_map = pd.DataFrame.from_dict(
{'Foo': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'Bar': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'}},
orient='index')
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, error_metadata_map)
def test_make_groups(self):
"""Correctly generates the groups for trajectory_categories"""
# Test with all categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']},
'Weight': {'60': ['PC.354'],
'55': ['PC.355'],
'50': ['PC.356'],
'52': ['PC.481'],
'57': ['PC.593'],
'65': ['PC.607'],
'68': ['PC.634'],
'70': ['PC.635'],
'72': ['PC.636']},
'Description': {'Control_mouse_I.D._354': ['PC.354'],
'Control_mouse_I.D._355': ['PC.355'],
'Control_mouse_I.D._356': ['PC.356'],
'Control_mouse_I.D._481': ['PC.481'],
'Control_mouse_I.D._593': ['PC.593'],
'Fasting_mouse_I.D._607': ['PC.607'],
'Fasting_mouse_I.D._634': ['PC.634'],
'Fasting_mouse_I.D._635': ['PC.635'],
'Fasting_mouse_I.D._636': ['PC.636']}}
self.assertEqual(bv._groups, exp_groups)
# Test with user-defined categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Treatment', 'DOB'])
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']}}
self.assertEqual(bv._groups, exp_groups)
def test_make_groups_natural_sorting(self):
# Ensure sample IDs are sorted using a natural sorting algorithm.
df = pd.DataFrame.from_dict({
'a2': {'Col1': 'foo', 'Col2': '1.0'},
'a1': {'Col1': 'bar', 'Col2': '-42.0'},
'a11.0': {'Col1': 'foo', 'Col2': '2e-5'},
'a-10': {'Col1': 'foo', 'Col2': '5'},
'a10': {'Col1': 'bar', 'Col2': '5'}},
orient='index')
coords = pd.DataFrame.from_dict({
'a10': np.array([-0.212230626531, 0.216034194368, 0.03532727349]),
'a11.0': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'a1': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'a2': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'a-10': np.array([0.27616778138, -0.0341866951102,
0.0633000238256])},
orient='index')
prop_expl = np.array([25.6216900347, 15.7715955926, 14.1215046787,
11.6913885817, 9.83044890697])
# Sort by sample IDs.
ga = GradientANOVA(coords, prop_expl, df)
exp_groups = {
'Col1': {
'foo': ['a-10', 'a2', 'a11.0'],
'bar': ['a1', 'a10']
},
'Col2': {
'1.0': ['a2'],
'-42.0': ['a1'],
'2e-5': ['a11.0'],
'5': ['a-10', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
# Sort sample IDs by Col2.
ga = GradientANOVA(coords, prop_expl, df,
trajectory_categories=['Col1'],
sort_category='Col2')
exp_groups = {
'Col1': {
'foo': ['a11.0', 'a2', 'a-10'],
'bar': ['a1', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
def test_get_trajectories(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories(self):
"""Should raise a NotImplementedError in usual execution as this is
a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories_error(self):
"""Should raise a RuntimeError if the user call _get_group_trajectories
with erroneous inputs"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("foo", ['foo'])
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("bar", [])
def test_compute_trajectories_results(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv._compute_trajectories_results("foo", [])
class AverageGradientANOVATests(BaseTests):
def test_get_trajectories_all(self):
"""get_trajectories returns the results of all categories"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map)
obs = av.get_trajectories()
exp_description = CategoryResults('Description', None, None,
'This group can not be used. All '
'groups should have more than 1 '
'element.')
exp_weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_description, exp_weight,
exp_dob, exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_single(self):
"""get_trajectories returns the results of the provided category"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'])
obs = av.get_trajectories()
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
"""get_trajectories returns the correct weighted results"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = av.get_trajectories()
exp_control_group = GroupResults('Control', np.array([5.7926887872,
4.3242308936,
2.9212403501,
5.5400792151,
1.2326804315]),
3.9621839355,
{'avg': 3.9621839355}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2187223286,
2.5522161282,
2.2349795861,
4.5278215248]),
4.1334348919,
{'avg': 4.1334348919}, None)
exp_treatment = CategoryResults('Treatment', 0.9057666800,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class TrajectoryGradientANOVATests(BaseTests):
def test_get_trajectories(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.6681963576,
7.0962717982,
7.1036434615,
4.0675712674]),
6.73392072123,
{'2-norm': 13.874494152}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'2-norm': 12.713431181}, None)
exp_treatment = CategoryResults('Treatment', 0.9374500147,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.9850643421,
6.1617529749,
7.7989125908,
4.9666249268]),
6.9780887086,
{'2-norm': 14.2894710091}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'2-norm': 11.3995901159}, None)
exp_treatment = CategoryResults('Treatment', 0.6248157720,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class FirstDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-1.5719245594,
0.0073716633,
-3.0360721941]),
-1.5335416967,
{'mean': -1.5335416967,
'std': 1.2427771485}, None)
exp_fast_group = GroupResults('Fast', np.array([-7.3127913749,
0.5779766231]),
-3.3674073758,
{'mean': -3.3674073758,
'std': 3.9453839990}, None)
exp_treatment = CategoryResults('Treatment', 0.6015260608,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight',
weighted=True)
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.8233113671,
1.6371596158,
-2.8322876639]),
-1.3394798050,
{'mean': -1.3394798050,
'std': 2.1048051097}, None)
exp_fast_group = GroupResults('Fast', np.array([-6.7312567642,
2.2922976743]),
-2.2194795449,
{'mean': -2.2194795449,
'std': 4.5117772193}, None)
exp_treatment = CategoryResults('Treatment', 0.8348644420,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class WindowDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight')
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.5790341819,
-2.0166764661,
-3.0360721941,
0.]),
-1.9079457105,
{'mean': -1.9079457105,
'std': 1.1592139913}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'mean': 6.5466301150,
'std': 3.3194494926},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0103976830,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight',
weighted=True)
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.6759675112,
-0.2510321601,
-2.8322876639,
0.]),
-1.4398218338,
{'mean': -1.4398218338,
'std': 1.31845790844}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'mean': 5.9589630005,
'std': 2.7942163293},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0110675605,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
rsteed11/GAT | gat/core/sna/ergm.py | 1 | 6988 | '''
Using PyMC for ERGM estimation
Tutorials from http://socialabstractions-blog.tumblr.com/post/53391947460/exponential-random-graph-models-in-python and https://gist.github.com/dmasad/78cb940de103edbee699
Author: Ryan Steed
10 July 2017
'''
from scipy.misc import comb
import numpy as np
import pymc
import networkx as nx
import matplotlib.pyplot as plt
import os
def probability(G):
params = calc_params(G)
estimates = {coef: np.mean(trace) for coef, trace in trace(matrix=nx.to_numpy_matrix(G),params=params,iters=3000,burn=1000).items()}
estimated_coefs, estimated_term_list = create_coefs(params=params,priors=estimates)
return coefs_to_prob(term_list=estimated_term_list)
def resilience(G,iters,mu):
params = calc_params(G,type="resilience")
traces = trace(matrix=nx.to_numpy_matrix(G),params=params,iters=iters,burn=0,mu=mu)
estimates = {coef: np.mean(trace) for coef, trace in traces.items()}
return estimates, traces
def calc_params(G,type="drag"):
if type=="resilience":
# using sample params until actual param for resilience is chosen (ASPL? Eigenvector?)
return {
"aspl":aspl(G,key="W"),
}
return {
"density": edge_count(G),
"block_match": node_match(G, "ontClass"),
# 'deltaistar2': istarDelta(adjMat, 2),
# 'deltaistar3': istarDelta(adjMat, 3),
# 'deltaostar2': ostarDelta(adjMat, 2),
}
def trace(matrix,params,iters,burn,mu=0):
# using specified set of priors, create coefficients
coefs, term_list = create_coefs(params=params, priors={coef: pymc.Normal(coef, mu, 0.01) for coef in params})
@pymc.deterministic
def probs(term_list=term_list):
probs = 1 / (1 + np.exp(-1 * sum(term_list)))
probs = np.array([[prob if prob > 0 else 0 for prob in row] for row in probs])
probs[np.diag_indices_from(probs)] = 0
# Manually cut off the top triangle:
probs[np.triu_indices_from(probs)] = 0
return probs
# Fitting
matrix[np.triu_indices_from(matrix)] = 0
max_attempts = 50
attempts = 0
while attempts < max_attempts:
try:
outcome = pymc.Bernoulli("outcome", probs, value=matrix, observed=True)
break
except:
print("Encountered zero probability error number",attempts,", trying again...")
if attempts >= max_attempts:
raise ValueError("Something went wrong with the stochastic probabilities")
attempts += 1
sim_outcome = pymc.Bernoulli("sim_outcome", probs)
args = [outcome, sim_outcome, probs]
# density_coef, density_term,
# block_coef, block_term]
for coef, info in coefs.items():
# Add both coefficient and term for each coefficient
for item in info:
args.append(item)
model = pymc.Model(args)
mcmc = pymc.MCMC(model)
mcmc.sample(iter=iters, burn=burn, thin=50) # approx. 30 seconds
traces = diagnostics(coefs=coefs, mcmc=mcmc)
goodness_of_fit(mcmc=mcmc)
return traces
## Service functions ##
def draw_init(G):
draw_init(G)
pos = nx.spring_layout(G, k=0.075, scale=4)
fig, ax = plt.subplots(figsize=(10, 10))
nx.draw_networkx_nodes(G, pos, node_size=100, ax=ax)
nx.draw_networkx_edges(G, pos, alpha=0.5, ax=ax)
nx.draw_networkx_labels(G, pos, ax=ax)
ax.set_xlim(-0.25, 4.25)
ax.set_ylim(-0.25, 4.25)
_ = ax.axis('off')
save_ergm_file(fig, "originalGraph.png") # print to file
def create_coefs(params, priors):
coefDict = {}
term_list = []
for coef,param in params.items():
coefVal = priors.get(coef)
term = coefVal * param
# value is tuple which contains both coefficient and parameter term
coefDict[coef] = (coefVal,term)
term_list.append(term)
return coefDict, term_list
def diagnostics(coefs, mcmc):
trace = {}
fig = plt.figure(figsize=(12, 6))
i = 1
for coef in coefs:
trace[coef] = mcmc.trace(coef)[:]
print(coef+": {0:.3f}, {1:.3f}".format(np.mean(trace[coef]), np.std(trace[coef])))
ax = fig.add_subplot(220+i)
ax.plot(trace[coef])
ax.set_title(coef)
i+=1
save_ergm_file(fig, 'diagnostics')
return trace
def goodness_of_fit(mcmc):
# Goodness of fit viz
realization = mcmc.trace("sim_outcome")[-1] # Take the last one
sim_g = nx.from_numpy_matrix(realization)
pos = nx.spring_layout(sim_g, k=0.075, scale=4)
fig, ax = plt.subplots(figsize=(10, 10))
nx.draw_networkx_nodes(sim_g, pos, node_size=100, ax=ax)
nx.draw_networkx_edges(sim_g, pos, alpha=0.5, ax=ax)
nx.draw_networkx_labels(sim_g, pos, ax=ax)
ax.set_xlim(-0.25, 4.25)
ax.set_ylim(-0.25, 4.25)
_ = ax.axis('off')
save_ergm_file(fig, 'new')
def save_ergm_file(fig,name):
dir = 'out/sna/ergm/'
os.makedirs(os.path.dirname(dir), exist_ok=True)
fig.savefig(dir+name)
def coefs_to_prob(term_list):
probs = 1 / (1 + np.exp(-1 * sum(term_list))) # The logistic function
probs[np.diag_indices_from(probs)] = 0
return probs
## Covariate methods ##
def aspl(G,key):
size = len(G)
spl = np.zeros(shape=(size, size))
for i in range(size):
for j in range(size):
try:
spl[i,j] = nx.shortest_path_length(G,source=G.nodes()[i],target=G.nodes()[j]) * .2
if spl[i,j] > 1:
spl[i,j] = 1
continue
except:
spl[i,j] = 0
if not G.is_directed():
spl[np.triu_indices_from(spl)] = 0
return spl
def edge_count(G):
size = len(G)
ones = np.ones((size, size))
# Zero out the upper triangle:
if not G.is_directed():
ones[np.triu_indices_from(ones)] = 0
return ones
def node_match(G, attrib):
size = len(G)
attribs = [node[1].get(attrib) for node in G.nodes(data=True)]
match = np.zeros(shape=(size, size))
for i in range(size):
for j in range(size):
if i != j and attribs[i] == attribs[j]:
match[i, j] = 1
if not G.is_directed():
match[np.triu_indices_from(match)] = 0
return match
# functions to get delta matrices
def istarDelta(am,k):
if k == 1:
# if k == 1 then this is just density
res = np.ones(am.shape)
return(res)
res = np.zeros(am.shape,dtype=int)
n = am.shape[0]
for i in range(n):
for j in range(n):
if i!=j:
nin = am[:,j].sum()-am[i,j]
res[i,j] = comb(nin,k-1,exact=True)
return(res)
def ostarDelta(am,k):
if k == 1:
# if k == 1 then this is just density
res = np.ones(am.shape)
return(res)
res = np.zeros(am.shape,dtype=int)
n = am.shape[0]
for i in range(n):
for j in range(n):
if i!=j:
nin = am[i,:].sum()-am[i,j]
res[i,j] = comb(nin,k-1,exact=True)
return(res) | mit |
justincassidy/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
TaxIPP-Life/Til | til/pgm/depart_retirement.py | 2 | 1083 | # -*- coding: utf-8 -*-
import sys
from numpy import maximum, array, ones
from pandas import Series
from utils import output_til_to_liam
from til.pgm.run_pension import run_pension
def depart_retirement(context, yearleg, time_step='year', to_check=False, behavior='taux_plein', cProfile=False):
''' cette fonction renvoie un vecteur de booleens indiquant les personnes partant en retraite
TODO : quand les comportements de départ seront plus complexes créer les .py associés'''
if behavior == 'taux_plein':
dates_tauxplein = run_pension(context, yearleg,
time_step=time_step, to_check=to_check,
output='dates_taux_plein', cProfile=cProfile)
date_tauxplein = maximum(dates_tauxplein['RSI'], dates_tauxplein['RG'], dates_tauxplein['FP'])
dates = output_til_to_liam(output_til=date_tauxplein,
index_til=dates_tauxplein['index'],
context_id=context['id'])
return dates.astype(int)
| gpl-3.0 |
zfrenchee/pandas | pandas/tests/test_compat.py | 3 | 3170 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
import pytest
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next, get_range_parameters, PY2)
class TestBuiltinIterators(object):
@classmethod
def check_result(cls, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
assert not isinstance(iter_res, list)
assert isinstance(list_res, list)
iter_res = list(iter_res)
assert len(list_res) == length
assert len(iter_res) == length
assert iter_res == exp
assert list_res == exp
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
assert next(iterkeys({1: 2})) == 1
assert next(iteritems({1: 2})) == (1, 2)
class TestCompatFunctions(object):
@pytest.mark.parametrize(
'start,stop,step', [(0, 10, 2), (11, -2, -1), (0, -5, 1), (2, 4, 8)])
def test_get_range_parameters(self, start, stop, step):
rng = range(start, stop, step)
if PY2 and len(rng) == 0:
start_expected, stop_expected, step_expected = 0, 0, 1
elif PY2 and len(rng) == 1:
start_expected, stop_expected, step_expected = start, start + 1, 1
else:
start_expected, stop_expected, step_expected = start, stop, step
start_result, stop_result, step_result = get_range_parameters(rng)
assert start_result == start_expected
assert stop_result == stop_expected
assert step_result == step_expected
| bsd-3-clause |
UltracoldAtomsLab/labhardware | projects/signalanalyze/sweep.py | 2 | 3522 | import numpy as np
from time import sleep
import matplotlib
matplotlib.rcParams['backend'] = 'wx'
import matplotlib.pylab as pl
from time import time, strftime, sleep
import ConfigParser
import sys
import logging
sys.path.append("../../drivers")
import sr785
# Get config file
try:
configfile = sys.argv[1]
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
except IndexError:
print "No configuration file was given."
sys.exit(1)
except IOError:
print "Cannot find configuration file."
sys.exit(1)
# Load configuraion
GPIB = config.getint('Setup','gpib_num')
start_freq = config.getint('Experiment','start_freq')
stop_freq = config.getint('Experiment','stop_freq')
npoints = config.getint('Experiment','points')
linear_type = config.getboolean('Experiment','linear_type')
# Setup output file
logger = logging.getLogger()
logfile = config.get('Setup','logfile')
if logfile == 'auto':
logname = "sweep_%s" %(strftime("%y%m%d_%H%M%S"))
logfile = "%s.log" %(logname)
else:
logname = logfile
hdlr = logging.FileHandler(logfile)
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# Save configuration info
f = open(configfile)
for line in f:
logger.info("# %s" %line.strip())
f.close()
logger.info("#"*10)
comment = raw_input("Comment: ")
logger.info("# Comment: %s" %comment)
logger.info("#"*10)
logger.info("# Frequency (Hz), LogMagnitude (dB), Phase (deg)")
##### Actual measurement
device = sr785.StanfordSR785(GPIB)
# Turn both displays live
device.write("DISP 0,1")
device.write("DISP 1,1")
device.write("A1RG 1; A2RG 1") # turn on auto-range for both channels
device.write("MGRP 2,3") # Swept Sine measurement group p437
device.write("MEAS 2,47") # Frequency response p437
device.write("VIEW 0,0") # Log magnitude, p440
device.write("VIEW 1,6") # Phase, p440
# Set start and stop frequencies
device.write("SSTR 2,%d" %(start_freq))
device.write("SSTP 2,%d" %(stop_freq))
print "Start frequency:", device.ask("SSTR ? 0")
print "Stop frequency :", device.ask("SSTP ? 0")
device.write("SRPT 2,0") # single shot experiment, p432
stype = 0 if linear_type else 1
device.write("SSTY 2,%d" %(stype))
device.write("SNPS 2,%d" %(npoints)) # set the number of points swewpt, p433
if linear_type:
f = np.linspace(start_freq, stop_freq, npoints)
else: # Logaritmic type
f = np.logspace(np.log10(start_freq), np.log10(stop_freq), npoints)
f = f.reshape(npoints, 1) # make column vector
device.ask("DSPS?") # Clear display messages
device.write("STRT") # start scan
dataa, datab = False, False
now = time()
try:
while not dataa or not datab:
sleep(1)
res = device.display_status_word(int(device.ask("DSPS ?")))
codes = res[1]
if 'SSA' in codes:
dataa = True
if 'SSB' in codes:
datab = True
print "%.1f seconds..." %(time()-now)
except KeyboardInterrupt:
sys.exit(0)
# If finished, get and save data
data = device.getdata(2)
out = np.concatenate((f, data),axis=1)
outfile = file(logfile, 'a')
np.savetxt(outfile, out, fmt="%.4e", delimiter=',')
outfile.close()
# Do plotting
pl.subplot(211)
pl.semilogx(f, data[:, 0])
pl.xlabel("Frequency (Hz)")
pl.ylabel("LogMagnitude (dB)")
pl.xlim([f[0], f[-1]])
pl.title(comment)
pl.subplot(212)
pl.semilogx(f, data[:, 1])
pl.xlabel("Frequency (Hz)")
pl.ylabel("Phase (deg)")
pl.xlim([f[0], f[-1]])
pl.savefig("%s.png" %(logname))
pl.show()
| mit |
sunilthorat09/code-for-blog | 2009/qt_mpl_bars.py | 19 | 7196 | """
This demo demonstrates how to embed a matplotlib (mpl) plot
into a PyQt4 GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich PyQt GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 19.01.2009
"""
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.textbox.setText('1 2 3 4')
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
str = unicode(self.textbox.text())
self.data = map(int, str.split())
x = range(len(self.data))
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
self.axes.bar(
left=x,
height=self.data,
width=self.slider.value() / 100.0,
align='center',
alpha=0.44,
picker=5)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
slider_label = QLabel('Bar width (%):')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 100)
self.slider.setValue(20)
self.slider.setTracking(True)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ self.textbox, self.draw_button, self.grid_cb,
slider_label, self.slider]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("This is a demo")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| unlicense |
bloyl/mne-python | tutorials/intro/70_report.py | 3 | 15256 | """
.. _tut-report:
Getting started with ``mne.Report``
===================================
`mne.Report` is a way to create interactive HTML summaries of your data. These
reports can show many different visualizations of one subject's data. A common
use case is creating diagnostic summaries to check data quality at different
stages in the processing pipeline. The report can show things like plots of
data before and after each preprocessing step, epoch rejection statistics, MRI
slices with overlaid BEM shells, all the way up to plots of estimated cortical
activity.
Compared to a Jupyter notebook, `mne.Report` is easier to deploy (the HTML
pages it generates are self-contained and do not require a running Python
environment) but less flexible (you can't change code and re-run something
directly within the browser). This tutorial covers the basics of building a
`~mne.Report`. As usual we'll start by importing the modules we need:
"""
import os
import matplotlib.pyplot as plt
import mne
###############################################################################
# Before getting started with :class:`mne.Report`, make sure the files you want
# to render follow the filename conventions defined by MNE:
#
# .. cssclass:: table-bordered
# .. rst-class:: midvalign
#
# =================================== =========================================
# Data object Filename convention (ends with)
# =================================== =========================================
# `~mne.io.Raw` ``-raw.fif(.gz)``, ``-raw_sss.fif(.gz)``,
# ``-raw_tsss.fif(.gz)``,
# ``_meg.fif(.gz)``, ``_eeg.fif(.gz)``,
# ``_ieeg.fif(.gz)``
# events ``-eve.fif(.gz)``
# `~mne.Epochs` ``-epo.fif(.gz)``
# `~mne.Evoked` ``-ave.fif(.gz)``
# `~mne.Covariance` ``-cov.fif(.gz)``
# `~mne.Projection` ``-proj.fif(.gz)``
# `~mne.transforms.Transform` ``-trans.fif(.gz)``
# `~mne.Forward` ``-fwd.fif(.gz)``
# `~mne.minimum_norm.InverseOperator` ``-inv.fif(.gz)``
# =================================== =========================================
#
# Alternatively, the dash ``-`` in the filename may be replaced with an
# underscore ``_``.
#
# Basic reports
# ^^^^^^^^^^^^^
#
# The basic process for creating an HTML report is to instantiate the
# :class:`~mne.Report` class, then use the :meth:`~mne.Report.parse_folder`
# method to select particular files to include in the report. Which files are
# included depends on both the ``pattern`` parameter passed to
# :meth:`~mne.Report.parse_folder` and also the ``subject`` and
# ``subjects_dir`` parameters provided to the :class:`~mne.Report` constructor.
#
# .. sidebar: Viewing the report
#
# On successful creation of the report, the :meth:`~mne.Report.save` method
# will open the HTML in a new tab in the browser. To disable this, use the
# ``open_browser=False`` parameter of :meth:`~mne.Report.save`.
#
# For our first example, we'll generate a barebones report for all the
# :file:`.fif` files containing raw data in the sample dataset, by passing the
# pattern ``*raw.fif`` to :meth:`~mne.Report.parse_folder`. We'll omit the
# ``subject`` and ``subjects_dir`` parameters from the :class:`~mne.Report`
# constructor, but we'll also pass ``render_bem=False`` to the
# :meth:`~mne.Report.parse_folder` method — otherwise we would get a warning
# about not being able to render MRI and ``trans`` files without knowing the
# subject.
path = mne.datasets.sample.data_path(verbose=False)
report = mne.Report(verbose=True)
report.parse_folder(path, pattern='*raw.fif', render_bem=False)
report.save('report_basic.html', overwrite=True)
###############################################################################
# This report yields a textual summary of the :class:`~mne.io.Raw` files
# selected by the pattern. For a slightly more useful report, we'll ask for the
# power spectral density of the :class:`~mne.io.Raw` files, by passing
# ``raw_psd=True`` to the :class:`~mne.Report` constructor. We'll also
# visualize the SSP projectors stored in the raw data's `~mne.Info` dictionary
# by setting ``projs=True``. Lastly, let's also refine our pattern to select
# only the filtered raw recording (omitting the unfiltered data and the
# empty-room noise recordings):
pattern = 'sample_audvis_filt-0-40_raw.fif'
report = mne.Report(raw_psd=True, projs=True, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_raw_psd.html', overwrite=True)
###############################################################################
# The sample dataset also contains SSP projectors stored as *individual files*.
# To add them to a report, we also have to provide the path to a file
# containing an `~mne.Info` dictionary, from which the channel locations can be
# read.
info_fname = os.path.join(path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
pattern = 'sample_audvis_*proj.fif'
report = mne.Report(info_fname=info_fname, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_proj.html', overwrite=True)
###############################################################################
# This time we'll pass a specific ``subject`` and ``subjects_dir`` (even though
# there's only one subject in the sample dataset) and remove our
# ``render_bem=False`` parameter so we can see the MRI slices, with BEM
# contours overlaid on top if available. Since this is computationally
# expensive, we'll also pass the ``mri_decim`` parameter for the benefit of our
# documentation servers, and skip processing the :file:`.fif` files:
subjects_dir = os.path.join(path, 'subjects')
report = mne.Report(subject='sample', subjects_dir=subjects_dir, verbose=True)
report.parse_folder(path, pattern='', mri_decim=25)
report.save('report_mri_bem.html', overwrite=True)
###############################################################################
# Now let's look at how :class:`~mne.Report` handles :class:`~mne.Evoked` data
# (we will skip the MRIs to save computation time). The following code will
# produce butterfly plots, topomaps, and comparisons of the global field
# power (GFP) for different experimental conditions.
pattern = 'sample_audvis-no-filter-ave.fif'
report = mne.Report(verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked.html', overwrite=True)
###############################################################################
# You have probably noticed that the EEG recordings look particularly odd. This
# is because by default, `~mne.Report` does not apply baseline correction
# before rendering evoked data. So if the dataset you wish to add to the report
# has not been baseline-corrected already, you can request baseline correction
# here. The MNE sample dataset we're using in this example has **not** been
# baseline-corrected; so let's do this now for the report!
#
# To request baseline correction, pass a ``baseline`` argument to
# `~mne.Report`, which should be a tuple with the starting and ending time of
# the baseline period. For more details, see the documentation on
# `~mne.Evoked.apply_baseline`. Here, we will apply baseline correction for a
# baseline period from the beginning of the time interval to time point zero.
baseline = (None, 0)
pattern = 'sample_audvis-no-filter-ave.fif'
report = mne.Report(baseline=baseline, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked_baseline.html', overwrite=True)
###############################################################################
# To render whitened :class:`~mne.Evoked` files with baseline correction, pass
# the ``baseline`` argument we just used, and add the noise covariance file.
# This will display ERP/ERF plots for both the original and whitened
# :class:`~mne.Evoked` objects, but scalp topomaps only for the original.
cov_fname = os.path.join(path, 'MEG', 'sample', 'sample_audvis-cov.fif')
baseline = (None, 0)
report = mne.Report(cov_fname=cov_fname, baseline=baseline, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked_whitened.html', overwrite=True)
###############################################################################
# If you want to actually *view* the noise covariance in the report, make sure
# it is captured by the pattern passed to :meth:`~mne.Report.parse_folder`, and
# also include a source for an :class:`~mne.Info` object (any of the
# :class:`~mne.io.Raw`, :class:`~mne.Epochs` or :class:`~mne.Evoked`
# :file:`.fif` files that contain subject data also contain the measurement
# information and should work):
pattern = 'sample_audvis-cov.fif'
info_fname = os.path.join(path, 'MEG', 'sample', 'sample_audvis-ave.fif')
report = mne.Report(info_fname=info_fname, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_cov.html', overwrite=True)
###############################################################################
# Adding custom plots to a report
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The Python interface has greater flexibility compared to the :ref:`command
# line interface <mne report>`. For example, custom plots can be added via
# the :meth:`~mne.Report.add_figs_to_section` method:
report = mne.Report(verbose=True)
fname_raw = os.path.join(path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(fname_raw, verbose=False).crop(tmax=60)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
# create some epochs and ensure we drop a few, so we can then plot the drop log
reject = dict(eeg=150e-6)
epochs = mne.Epochs(raw=raw, events=events, event_id=event_id,
tmin=-0.2, tmax=0.7, reject=reject, preload=True)
fig_drop_log = epochs.plot_drop_log(subject='sample', show=False)
# now also plot an evoked response
evoked_aud_left = epochs['auditory/left'].average()
fig_evoked = evoked_aud_left.plot(spatial_colors=True, show=False)
# add the custom plots to the report:
report.add_figs_to_section([fig_drop_log, fig_evoked],
captions=['Dropped Epochs',
'Evoked: Left Auditory'],
section='drop-and-evoked')
report.save('report_custom.html', overwrite=True)
###############################################################################
# Adding a slider
# ^^^^^^^^^^^^^^^
#
# Sliders provide an intuitive way for users to interactively browse a
# predefined set of images. You can add sliders via
# :meth:`~mne.Report.add_slider_to_section`:
report = mne.Report(verbose=True)
figs = list()
times = evoked_aud_left.times[::30]
for t in times:
figs.append(evoked_aud_left.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
report.save('report_slider.html', overwrite=True)
###############################################################################
# Adding coregistration plot to a report
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now we see how :class:`~mne.Report` can plot coregistration results. This is
# very useful to check the quality of the :term:`trans` coregistration file
# that allows to align anatomy and MEG sensors.
report = mne.Report(info_fname=info_fname, subject='sample',
subjects_dir=subjects_dir, verbose=True)
pattern = "sample_audvis_raw-trans.fif"
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_coreg.html', overwrite=True)
###############################################################################
# Adding ``SourceEstimate`` (STC) plot to a report
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now we see how :class:`~mne.Report` handles :class:`~mne.SourceEstimate`
# data. The following will produce a :term:`STC` plot with vertex
# time courses. In this scenario, we also demonstrate how to use the
# :meth:`mne.viz.Brain.screenshot` method to save the figs in a slider.
report = mne.Report(verbose=True)
fname_stc = os.path.join(path, 'MEG', 'sample', 'sample_audvis-meg')
stc = mne.read_source_estimate(fname_stc, subject='sample')
figs = list()
kwargs = dict(subjects_dir=subjects_dir, initial_time=0.13,
clim=dict(kind='value', lims=[3, 6, 9]))
for hemi in ('lh', 'rh'):
brain = stc.plot(hemi=hemi, **kwargs)
brain.toggle_interface(False)
figs.append(brain.screenshot(time_viewer=True))
brain.close()
# add the stc plot to the report:
report.add_slider_to_section(figs)
report.save('report_stc.html', overwrite=True)
###############################################################################
# Managing report sections
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# The MNE report command internally manages the sections so that plots
# belonging to the same section are rendered consecutively. Within a section,
# the plots are ordered in the same order that they were added using the
# :meth:`~mne.Report.add_figs_to_section` command. Each section is identified
# by a toggle button in the top navigation bar of the report which can be used
# to show or hide the contents of the section. To toggle the show/hide state of
# all sections in the HTML report, press :kbd:`t`, or press the toggle-all
# button in the upper right.
#
# .. sidebar:: Structure
#
# Although we've been generating separate reports in each of these examples,
# you could easily create a single report for all :file:`.fif` files (raw,
# evoked, covariance, etc) by passing ``pattern='*.fif'``.
#
#
# Editing a saved report
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Saving to HTML is a write-only operation, meaning that we cannot read an
# ``.html`` file back as a :class:`~mne.Report` object. In order to be able
# to edit a report once it's no longer in-memory in an active Python session,
# save it as an HDF5 file instead of HTML:
report.save('report.h5', overwrite=True)
report_from_disk = mne.open_report('report.h5')
print(report_from_disk)
###############################################################################
# This allows the possibility of multiple scripts adding figures to the same
# report. To make this even easier, :class:`mne.Report` can be used as a
# context manager:
with mne.open_report('report.h5') as report:
report.add_figs_to_section(fig_evoked,
captions='Left Auditory',
section='evoked',
replace=True)
report.save('report_final.html', overwrite=True)
###############################################################################
# With the context manager, the updated report is also automatically saved
# back to :file:`report.h5` upon leaving the block.
| bsd-3-clause |
droundy/deft | papers/fuzzy-fmt/figs/erf-wca-error.py | 1 | 1588 | from __future__ import division
from numpy import *
import matplotlib.pyplot as plt
from scipy.special import erf
min_temp = 0.1
max_temp = 1.3
eps = 1
R = 1
n = 0.5
sigma = R*2**(5.0/6.0)
Temps = arange(min_temp, max_temp, 0.1)
alphas = sigma*(2/(1 + sqrt(Temps *log(2)/eps)))**(1.0/6.0)
Xis = alphas/(6*sqrt(pi)*(log(2) + sqrt(Temps *log(2)/eps)))
N = 1000
def V_wca(r):
if r < 2*R:
return 4*eps*((sigma/r)**12 - (sigma/r)**6) + eps
else:
return 0
def V_erf(r, kT, a, X):
return -kT*log((erf((r-a)/X)+1)/2)
plt.figure(1)
for i in range(len(Temps)):
x = linspace(2*R, alphas[i]*1.5, N)
dx = (1.3*alphas[i]-2*R)/N
error = zeros_like(x)
for j in arange(1, len(x)):
error[j] = error[j-1] + 4*pi*n*( V_erf(x[j], Temps[i], alphas[i], Xis[i]))*x[i]**2*dx
plt.plot(x, error, label = r'$kT/\epsilon = %g$' % Temps[i] )
plt.xlim(2*R, 2.4)
plt.ylabel("$\delta E/\epsilon$")
plt.xlabel('$r/R$')
plt.title('Integrated difference $V_{wca}$ and $V_{erf}$ for r > 2R')
plt.legend(loc='best')
plt.figure(2)
for i in range(len(Temps)):
x = linspace(alphas[i], alphas[i]*1.3, N)
dx = alphas[i]*(1.3-1)/N
error = zeros_like(x)
for j in arange(1, len(x)):
error[j] = error[j-1] + 4*pi*n**2*(V_wca(x[j]) - V_erf(x[j], Temps[i], alphas[i], Xis[i]))*x[i]**2*dx
plt.plot(x, error, label = r'$kT/\epsilon = %g$' % Temps[i] )
plt.xlim(alphas[-1], alphas[0]*1.15)
plt.ylabel("$\delta E/\epsilon$")
plt.xlabel('$r/R$')
plt.title('Integrated difference between $V_{wca}$ and $V_{erf}$')
plt.legend(loc='best')
plt.show()
| gpl-2.0 |
rezoo/chainer | chainer/training/extensions/plot_report.py | 4 | 6421 | import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before calling
``trainer.run``. For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.run()
Then, once one of instances of this extension is called,
``matplotlib.use`` will have no effect.
For the details, please see here:
https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
file_name (str): Name of the figure file under the output directory.
It can be a format string.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): Set the axis grid on if True. Default is True.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, file_name='plot.png', marker='x',
grid=True):
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if _available:
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plt.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
| mit |
alorenzo175/pvlib-python | pvlib/test/test_srml.py | 2 | 2888 | import inspect
import os
from numpy import isnan
import pandas as pd
from pandas.util.testing import network
import pytest
from pvlib.iotools import srml
test_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
srml_testfile = os.path.join(test_dir, '../data/SRML-day-EUPO1801.txt')
def test_read_srml():
srml.read_srml(srml_testfile)
@network
def test_read_srml_remote():
srml.read_srml('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt')
def test_read_srml_columns_exist():
data = srml.read_srml(srml_testfile)
assert 'ghi_0' in data.columns
assert 'ghi_0_flag' in data.columns
assert 'dni_1' in data.columns
assert 'dni_1_flag' in data.columns
assert '7008' in data.columns
assert '7008_flag' in data.columns
def test_read_srml_nans_exist():
data = srml.read_srml(srml_testfile)
assert isnan(data['dni_0'][1119])
assert data['dni_0_flag'][1119] == 99
@pytest.mark.parametrize('url,year,month', [
('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt',
2018, 1),
('http://solardat.uoregon.edu/download/Archive/EUPO1612.txt',
2016, 12),
])
def test_read_srml_dt_index(url, year, month):
data = srml.read_srml(url)
start = pd.Timestamp('{:04d}{:02d}01 00:00'.format(year, month))
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('{:04d}{:02d}31 23:59'.format(year, month))
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[59::60].minute == 59).all()
assert str(year) not in data.columns
@pytest.mark.parametrize('column,expected', [
('1001', 'ghi_1'),
('7324', '7324'),
('2001', '2001'),
('2017', 'dni_7')
])
def test_map_columns(column, expected):
assert srml.map_columns(column) == expected
@network
def test_read_srml_month_from_solardat():
url = 'http://solardat.uoregon.edu/download/Archive/EUPO1801.txt'
file_data = srml.read_srml(url)
requested = srml.read_srml_month_from_solardat('EU', 2018, 1)
assert file_data.equals(requested)
@network
def test_15_minute_dt_index():
data = srml.read_srml_month_from_solardat('TW', 2019, 4, 'RQ')
start = pd.Timestamp('20190401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('20190430 23:45')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[3::4].minute == 45).all()
@network
def test_hourly_dt_index():
data = srml.read_srml_month_from_solardat('CD', 1986, 4, 'PH')
start = pd.Timestamp('19860401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('19860430 23:00')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index.minute == 0).all()
| bsd-3-clause |
nmayorov/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
CVML/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
zhmz90/first_step_with_julia_kaggle.jl | data/data/knn_python_custom_tutorial.py | 3 | 1873 | # Loading Data
import pandas as pd
from skimage.io import imread
import numpy as np
def read_data(typeData, labelsInfo, imageSize, path):
#Intialize x matrix
x = np.zeros((labelsInfo.shape[0], imageSize))
for (index, idImage) in enumerate(labelsInfo["ID"]):
#Read image file
nameFile = "{0}/{1}Resized/{2}.Bmp".format(path, typeData, idImage)
img = imread(nameFile, as_grey=True)
x[index, :] = np.reshape(img, (1, imageSize))
return x
imageSize = 400 # 20 x 20 pixels
#Set location of data files , folders
path = ...
labelsInfoTrain = pd.read_csv("{0}/trainLabels.csv".format(path))
#Read training matrix
xTrain = read_data("train", labelsInfoTrain, imageSize, path)
#Read information about test data ( IDs ).
labelsInfoTest = pd.read_csv("{0}/sampleSubmission.csv".format(path))
#Read test matrix
xTest = read_data("test", labelsInfoTest, imageSize, path)
yTrain = map(ord, labelsInfoTrain["Class"])
# Defining main functions
def euclidean_distance (a, b):
dif = a - b
return dif.dot(dif)
def get_k_nearest_neighbors(x, i, k):
imageI = x[i,:]
distances = [euclidean_distance(imageI, x[j,:]) for j in xrange(x.shape[0])]
sortedNeighbors = np.argsort(distances)
kNearestNeighbors = sortedNeighbors[1:(k+1)]
return kNearestNeighbors
def assign_label(x, y, k, i):
kNearestNeighbors = get_k_nearest_neighbors(x, i, k)
counts = {}
highestCount = 0
mostPopularLabel = 0
for n in kNearestNeighbors:
labelOfN = y[n]
if labelOfN not in counts :
counts[labelOfN] = 0
counts[labelOfN] += 1
if counts[labelOfN] > highestCount :
highestCount = counts[labelOfN]
mostPopularLabel = labelOfN
return mostPopularLabel
# Running LOOF-CV with 1NN sequentially
import time
start = time.time()
k=1
yPredictions = [assign_label(xTrain, yTrain, k, i) for i in xrange(xTrain.shape[0])]
print time.time() - start, "seconds elapsed"
| mit |
kwilliams-mo/iris | docs/iris/example_code/graphics/hovmoller.py | 3 | 1393 | """
Hovmoller diagram of monthly surface temperature
================================================
This example demonstrates the creation of a Hovmoller diagram with fine control over plot ticks and labels.
The data comes from the Met Office OSTIA project and has been pre-processed to calculate the monthly mean sea
surface temperature.
"""
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import iris
import iris.quickplot as qplt
import iris.unit
def main():
fname = iris.sample_data_path('ostia_monthly.nc')
# load a single cube of surface temperature between +/- 5 latitude
cube = iris.load_cube(fname, iris.Constraint('surface_temperature', latitude=lambda v: -5 < v < 5))
# Take the mean over latitude
cube = cube.collapsed('latitude', iris.analysis.MEAN)
# Now that we have our data in a nice way, lets create the plot
# contour with 20 levels
qplt.contourf(cube, 20)
# Put a custom label on the y axis
plt.ylabel('Time / years')
# Stop matplotlib providing clever axes range padding
plt.axis('tight')
# As we are plotting annual variability, put years as the y ticks
plt.gca().yaxis.set_major_locator(mdates.YearLocator())
# And format the ticks to just show the year
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
joernhees/scikit-learn | sklearn/linear_model/sag.py | 30 | 12959 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
samyachour/EKG_Analysis | plot.py | 1 | 2111 | import matplotlib.pyplot as plt
def plot(y, title="Signal", xLab="Index * 0.003s", yLab="mV", size=(9.7,6)):
fig = plt.figure(figsize=size) # I used figures to customize size
ax = fig.add_subplot(111)
ax.plot(y)
ax.set_title(title)
# fig.savefig('/Users/samy/Downloads/{0}.png'.format(self.name))
ax.set_ylabel(yLab)
ax.set_xlabel(xLab)
plt.show()
def plotBaseline(y, baseline=0, title="Signal", xLab="Index * 0.003s", yLab="mV", size=(9.7,6)):
fig = plt.figure(figsize=size) # I used figures to customize size
ax = fig.add_subplot(111)
ax.plot(y)
ax.set_title(title)
# fig.savefig('/Users/samy/Downloads/{0}.png'.format(self.name))
plt.axhline(y=baseline)
ax.set_ylabel(yLab)
ax.set_xlabel(xLab)
plt.show()
def multiplot(data, graph_names):
#plot multiple lines in one graph
# input:
# data = list of data to plot
# graph_names = list of record names to show in the legend
for l in data:
plt.plot(l)
plt.legend(graph_names)
plt.show()
def plotRPeaks(signal):
fig = plt.figure(figsize=(9.7, 6)) # I used figures to customize size
ax = fig.add_subplot(111)
ax.plot(signal.data)
# ax.axhline(self.baseline)
ax.plot(*zip(*signal.RPeaks), marker='o', color='r', ls='')
ax.set_title(signal.name)
# fig.savefig('/Users/samy/Downloads/{0}.png'.format(self.name))
plt.show()
def plotCoords(data, coords):
fig = plt.figure(figsize=(9.7, 6)) # I used figures to customize size
ax = fig.add_subplot(111)
ax.plot(data)
ax.plot(*zip(*coords), marker='o', color='r', ls='')
plt.show()
def plotBins(bins, recordTitle=""):
fig = plt.figure(figsize=(9.7, 6))
ax = fig.add_subplot(111)
rects1 = ax.bar(0.5, bins[0], color='r')
rects2 = ax.bar(1.5, bins[1], color='b')
rects3 = ax.bar(2.5, bins[2], color='g')
ax.legend((rects1[0], rects2[0], rects3[0]), ('bin 1', 'bin 2', 'bin 3'))
ax.set_ylabel('Bin percent')
ax.set_xlabel('Bins')
ax.set_title('RR Interval bins' + recordTitle)
plt.show()
| gpl-3.0 |
cpcloud/odo | odo/backends/tests/test_hdfs.py | 9 | 9782 | from __future__ import absolute_import, division, print_function
import pytest
import os
pywebhdfs = pytest.importorskip('pywebhdfs')
pyhive = pytest.importorskip('pyhive')
host = os.environ.get('HDFS_TEST_HOST')
pytestmark = pytest.mark.skipif(host is None,
reason='No HDFS_TEST_HOST envar defined')
from pywebhdfs.webhdfs import PyWebHdfsClient
import pandas as pd
import numpy as np
import uuid
from odo.backends.hdfs import discover, HDFS, CSV, SSH, dialect_of, TableProxy
from odo.backends.sql import resource
from odo.backends.ssh import sftp
from odo import into, drop, JSONLines, odo
from odo.utils import filetext, ignoring, tmpfile
import sqlalchemy as sa
from datashape import dshape
from odo.directory import Directory
from contextlib import contextmanager
hdfs = PyWebHdfsClient(host=host, port='14000', user_name='hdfs')
ds = dshape('var * {id: ?int64, name: ?string, amount: ?int64}')
engine = resource('hive://hdfs@%s:10000/default' % host)
accounts_1_csv = """
id,name,amount
1,Alice,100
2,Bob,200
3,Charlie,300
4,Dan,400
5,Edith,500""".strip()
accounts_2_csv = """
id,name,amount
6,Frank,600
7,George,700
8,Hannah,800
""".strip()
accounts_3_csv = """
id,name,amount
9,Isaac,900
10,Jane,1000
""".strip()
@contextmanager
def accounts_data():
a = '/user/hive/test/accounts/accounts.1.csv'
b = '/user/hive/test/accounts/accounts.2.csv'
c = '/user/hive/test/accounts.3.csv'
hdfs.make_dir('user/hive/test/accounts')
hdfs.create_file(a.lstrip('/'), accounts_1_csv)
hdfs.create_file(b.lstrip('/'), accounts_2_csv)
hdfs.create_file(c.lstrip('/'), accounts_3_csv)
A = HDFS(CSV)(a, hdfs=hdfs)
B = HDFS(CSV)(b, hdfs=hdfs)
C = HDFS(CSV)(c, hdfs=hdfs)
directory = HDFS(Directory(CSV))('/user/hive/test/accounts/', hdfs=hdfs)
try:
yield (directory, (A, B, C))
finally:
hdfs.delete_file_dir(a)
hdfs.delete_file_dir(b)
hdfs.delete_file_dir(c)
@contextmanager
def accounts_ssh():
""" Three csv files on the remote host in a directory """
dirname = str(uuid.uuid1())
conn = sftp(**auth)
conn.mkdir(dirname)
with filetext(accounts_1_csv) as fn:
conn.put(fn, dirname + '/accounts.1.csv')
with filetext(accounts_2_csv) as fn:
conn.put(fn, dirname + '/accounts.2.csv')
with filetext(accounts_3_csv) as fn:
conn.put(fn, dirname + '/accounts.3.csv')
filenames = [dirname + '/accounts.%d.csv' % i for i in [1, 2, 3]]
uris = ['ssh://ubuntu@%s:%s' % (host, fn) for fn in filenames]
try:
yield 'ssh://ubuntu@%s:%s/*.csv' % (host, dirname), uris
finally:
for fn in filenames:
conn.remove(fn)
conn.rmdir(dirname)
def test_discover():
with accounts_data() as (directory, (a, b, c)):
assert str(discover(a)).replace('?', '') == \
'var * {id: int64, name: string, amount: int64}'
assert str(discover(directory)).replace('?', '') == \
'var * {id: int64, name: string, amount: int64}'
@contextmanager
def tmpfile_hdfs(ext=''):
fn = str(uuid.uuid1())
if ext:
fn = fn + '.' + ext
try:
yield fn
finally:
hdfs.delete_file_dir(fn)
def test_copy_local_files_to_hdfs():
with tmpfile_hdfs() as target:
with filetext('name,amount\nAlice,100\nBob,200') as source:
csv = CSV(source)
scsv = HDFS(CSV)(target, hdfs=hdfs)
into(scsv, csv, blocksize=10) # 10 bytes per message
assert discover(scsv) == discover(csv)
def test_copy_hdfs_files_locally():
with tmpfile('csv') as target:
with accounts_data() as (d, (a, b, c)):
csv = into(target, a)
with open(csv.path) as f:
assert f.read().strip() == accounts_1_csv
def test_copy_hdfs_data_into_memory():
with accounts_data() as (d, (a, b, c)):
assert into(list, a)
def test_HDFS_constructor_allows_user_alternatives():
r = HDFS(CSV)('foo.csv', username='alice', host='host')
assert r.hdfs.user_name == 'alice'
def test_hdfs_resource():
r = resource('hdfs://user@hostname:1234:/path/to/myfile.json')
assert isinstance(r, HDFS(JSONLines))
assert r.hdfs.user_name == 'user'
assert r.hdfs.host == 'hostname'
assert r.hdfs.port == '1234'
assert r.path == '/path/to/myfile.json'
assert isinstance(resource('hdfs://path/to/myfile.csv',
host='host', user='user', port=1234),
HDFS(CSV))
assert isinstance(resource('hdfs://path/to/*.csv',
host='host', user='user', port=1234),
HDFS(Directory(CSV)))
def normalize(s):
return ' '.join(s.split())
auth = {'hostname': host,
'key_filename': os.path.expanduser('~/.ssh/cdh_testing.key'),
'username': 'ubuntu'}
@contextmanager
def hive_table(host):
name = ('temp' + str(uuid.uuid1()).replace('-', ''))[:30]
uri = 'hive://hdfs@%s:10000/default::%s' % (host, name)
try:
yield uri
finally:
with ignoring(Exception):
drop(uri)
def test_hdfs_directory_hive_creation():
with accounts_data() as (hdfs_directory, (a, b, c)):
with hive_table(host) as uri:
t = into(uri, hdfs_directory)
assert isinstance(t, sa.Table)
result = into(set, t)
assert result > 0
assert discover(t) == ds
t2 = into(uri, c) # append new singleton file
assert len(into(list, t2)) > len(result)
def test_ssh_hive_creation():
with hive_table(host) as uri:
with accounts_ssh() as (_, (remote, _, _)):
t = into(uri, remote, raise_on_errors=True, **auth)
assert isinstance(t, sa.Table)
assert into(set, t) == into(set, remote, **auth)
# Load again
t2 = into(uri, remote, raise_on_errors=True, **auth)
assert isinstance(t2, sa.Table)
assert len(into(list, t2)) == 2 * len(into(list, remote, **auth))
def test_hive_creation_from_local_file():
with filetext(accounts_1_csv, extension='csv') as fn:
with hive_table(host) as uri:
t = into(uri, fn, **auth)
assert isinstance(t, sa.Table)
assert into(set, t) == into(set, fn)
t2 = into(uri, fn, **auth)
assert isinstance(t2, sa.Table)
assert len(into(list, t2)) == 2 * len(into(list, fn))
def test_ssh_directory_hive_creation():
with hive_table(host) as uri:
with accounts_ssh() as (directory, _):
t = odo(directory, uri, **auth)
assert isinstance(t, sa.Table)
assert discover(t) == ds
assert len(into(list, t)) > 0
def test_ssh_hive_creation_with_full_urls():
with hive_table(host) as uri:
with accounts_ssh() as (_, (remote, _, _)):
t = into(uri, remote,
key_filename=os.path.expanduser('~/.ssh/cdh_testing.key'))
assert isinstance(t, sa.Table)
n = len(into(list, t))
assert n > 0
# Load it again
into(t, remote,
key_filename=os.path.expanduser('~/.ssh/cdh_testing.key'))
# Doubles length
assert len(into(list, t)) == 2 * n
def test_hive_resource():
db = resource('hive://hdfs@%s:10000/default' % host)
assert isinstance(db, sa.engine.Engine)
db = resource('hive://%s/' % host)
assert isinstance(db, sa.engine.Engine)
assert str(db.url) == 'hive://hdfs@%s:10000/default' % host
def test_append_object_to_HDFS_foo():
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
with tmpfile_hdfs('json') as fn:
js = into('hdfs://%s:%s' % (host, fn), df, hdfs=hdfs)
assert (into(np.ndarray, js) == into(np.ndarray, df)).all()
def test_dialect_of():
with filetext(accounts_1_csv) as fn:
d = dialect_of(CSV(fn))
assert d['delimiter'] == ','
assert d['has_header'] is True
with accounts_data() as (directory, (a, b, c)):
directory2 = HDFS(Directory(CSV))(directory.path, hdfs=directory.hdfs)
d = dialect_of(directory2)
assert d['has_header'] is True
directory2 = HDFS(Directory(CSV))(directory.path, hdfs=directory.hdfs,
has_header=False)
d = dialect_of(directory2)
assert d['has_header'] is False
def test_hive_resource_with_internal_external():
with hive_table(host) as uri:
r = resource(uri, external=False, stored_as='PARQUET',
dshape='var * {name: string, balance: int32}')
assert isinstance(r, sa.Table)
with hive_table(host) as uri:
r = resource(uri, external=False, stored_as='PARQUET')
assert not isinstance(r, sa.Table)
with hive_table(host) as uri:
r = resource(uri, external=True, stored_as='PARQUET')
assert not isinstance(r, sa.Table)
def test_copy_hive_csv_table_to_parquet():
with hive_table(host) as csv:
with accounts_ssh() as (_, (remote, _, _)):
c = odo(remote, csv, **auth)
with hive_table(host) as parquet:
p = odo(csv, parquet, stored_as='PARQUET', external=False)
assert odo(c, list) == odo(p, list)
with hive_table(host) as parquet:
try:
fn = '/home/hdfs/%s.parquet' % str(uuid.uuid1()).replace('-', '')[:20]
p = odo(csv, parquet, stored_as='PARQUET',
external=True, path=fn)
assert odo(c, list) == odo(p, list)
finally:
hdfs.delete_file_dir(fn)
| bsd-3-clause |
McIntyre-Lab/papers | fear_ase_2016/scripts/ase_summary/panel_plot.py | 1 | 2852 | #!/usr/bin/env python
import matplotlib.pyplot as plt
def plotPanelKDE(ms, dat):
""" Function to plot a panel of distributions of $\theta$. """
# mating status
if ms == 'M':
MS = 'Mated'
else:
MS = 'Virgin'
# Group by line
grp = dat.groupby('line')
# Plot all distributions and color bg by APN rank
fig, axes = plt.subplots(8, 9, figsize=(20, 20))
fig.suptitle(MS, fontsize=18)
axes = axes.ravel()
# Iterate over lines and plot each distribution.
cnt = 0
for i, val in grp:
ax = axes[cnt]
val[['q4_mean_theta', 'q5_mean_theta', 'q6_mean_theta']].plot(kind='kde', ax=ax, color=['b', 'r', 'g'], legend=False, title=i)
ax.axvline(0.5, color='k')
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
cnt += 1
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
def plotPanelSnpIndel(ms, dat):
""" Function to plot a panel of SNP/INDEL counts vs $\theta$. """
# mating status
if ms == 'M':
MS = 'Mated'
else:
MS = 'Virgin'
# Group by line
grp = dat.groupby('line')
# Plot all distributions and color bg by APN rank
fig, axes = plt.subplots(8, 9, figsize=(20, 20), sharey=True, sharex=True)
fig.suptitle(MS, fontsize=18)
axes = axes.ravel()
# Iterate over lines and plot each distribution.
cnt = 0
for i, val in grp:
ax = axes[cnt]
val.plot(kind='scatter', x='num_snps', y='q5_mean_theta', ax=ax, color='b', label='SNPs', legend=False, title=i)
val.plot(kind='scatter', x='num_indels', y='q5_mean_theta', ax=ax, color='r', marker='^', label='INDELs', legend=False)
ax.axhline(0.5, color='r')
ax.set_ylabel(r'$\theta$')
ax.set_xlabel('Number Polymorphisms')
ax.get_xaxis().set_ticks([])
ax.set_ylim(-0.2, 1.2)
ax.set_xlim(-0.2, 100)
cnt += 1
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
def plotPanelAPN(ms, dat):
""" Function to plot a panel of SNP/INDEL counts vs $\theta$. """
# mating status
if ms == 'M':
MS = 'Mated'
else:
MS = 'Virgin'
# Group by line
grp = dat.groupby('line')
# Plot all distributions and color bg by APN rank
fig, axes = plt.subplots(8, 9, figsize=(20, 20), sharey=True, sharex=True)
fig.suptitle(MS, fontsize=18)
axes = axes.ravel()
# Iterate over lines and plot each distribution.
cnt = 0
for i, val in grp:
ax = axes[cnt]
val.plot(kind='scatter', x='mean_apn', y='q5_mean_theta', ax=ax, color='b', legend=False, title=i, rot=45)
ax.axhline(0.5, color='r')
ax.set_ylabel(r'$\theta$')
ax.set_xlabel('Mean APN')
ax.set_ylim(-0.2, 1.2)
ax.set_xlim(0, 10000)
cnt += 1
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
| lgpl-3.0 |
gwpy/gwpy | gwpy/plot/units.py | 1 | 1345 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Support for plotting with units
"""
from astropy.units.format import LatexInline
class LatexInlineDimensional(LatexInline):
"""Custom LaTeX formatter that includes physical type (if available)
Mainly for auto-labelling `Axes` in matplotlib figures
"""
name = 'latex_inline_dimensional'
@classmethod
def to_string(cls, unit):
u = '[{0}]'.format(super().to_string(unit))
if unit.physical_type not in {None, 'unknown', 'dimensionless'}:
ptype = str(unit.physical_type).split('/', 1)[0].title()
return '{0} {1}'.format(cls._latex_escape(ptype), u)
return u
| gpl-3.0 |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| gpl-3.0 |
xuanyuanking/spark | python/pyspark/pandas/_typing.py | 9 | 1851 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import decimal
from typing import Any, Tuple, TypeVar, Union, TYPE_CHECKING
import numpy as np
from pandas.api.extensions import ExtensionDtype
if TYPE_CHECKING:
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.generic import Frame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
# TypeVars
T = TypeVar("T")
FrameLike = TypeVar("FrameLike", bound="Frame")
IndexOpsLike = TypeVar("IndexOpsLike", bound="IndexOpsMixin")
# Type aliases
Scalar = Union[
int, float, bool, str, bytes, decimal.Decimal, datetime.date, datetime.datetime, None
]
# TODO: use the actual type parameters.
Label = Tuple[Any, ...]
Name = Union[Any, Label]
Axis = Union[int, str]
Dtype = Union[np.dtype, ExtensionDtype]
DataFrameOrSeries = Union["DataFrame", "Series"]
SeriesOrIndex = Union["Series", "Index"]
| apache-2.0 |
cdusold/TensorFlowRBF | PlotTestData.py | 1 | 7172 | from matplotlib import pyplot as plt
import numpy as np
results = np.load("feedforwardtimings.npy")
#Raw Timings Plot Feedforward
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("feedforward.pgf")
plt.show()
#Relative Timings Feedforward
thread_counts = np.array([1,2,4,8,16,32*256])
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
def amdahlPortion(speedup,threads):
return threads*(speedup-1)/((threads-1)*speedup)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
#Backprop time
results = np.load("backproptimings.npy")
#Raw Timings Plot Backpropagation
plt.figure()
plt.suptitle("Backpropagation", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("backprop.pgf")
plt.show()
#Relative Timings Backpropagation
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
| mit |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| apache-2.0 |
Vishluck/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
mitschabaude/nanopores | scripts/numerics/force_profiles.py | 1 | 6067 | "calculate and save 1D force profiles for 2D Howorka pore for different molecule charges"
import os, numpy, dolfin
import nanopores, Howorka
import matplotlib.pyplot as plt
from collections import OrderedDict
#from nanopores import kB, T, add_params, save_dict, saveplots, showplots
#from matplotlib.pyplot import figure, plot, legend, show, title, xlabel, ylabel, savefig
nanopores.add_params(
himp = .2,
hexp = .5,
Nimp = 5e4,
Nexp = 2e4,
Nz = 50,
rMolecule = 0.5,
name = "",
save = False,
Qmols = [-3., -2., -1., 1e-4, 1.],
savefig = False,
)
a, b = -10., 10.
folder = os.path.expanduser("~") + "/papers/pnps-numerics/data/forces/"
space = (a, b, Nz)
domain = nanopores.Interval(a, b)
domain.addsubdomains(fluid = domain)
domain.addboundaries(top = domain.boundary("right"),
bottom = domain.boundary("left"))
geo = domain.create_geometry(lc=(b-a)/(Nz-1))
mesh = geo.mesh
V = dolfin.FunctionSpace(mesh, "CG", 1)
v2d = dolfin.vertex_to_dof_map(V)
coord = geo.mesh.coordinates()[:, 0] # numpy array
linspace = numpy.linspace(*space)
def function_from_values(values):
u = dolfin.Function(V)
u.vector()[v2d] = numpy.array(values)
return u
def function_from_lambda(f):
values = [f(z) for z in coord]
return function_from_values(values)
def plot_function(u, *args, **kwargs):
plt.plot(linspace, [u(z) for z in linspace], *args, **kwargs)
def plot_point(F):
plot_function(F, "-b", label="point-size")
def plot_finite(F):
plot_function(F, "s--g", label="finite-size")
def post_plot():
plt.xlabel("z-coordinate of molecule center [nm]")
plt.ylabel("force [pN]")
plt.legend(loc="best")
def plot_profile(Fimp, Fexp):
plt.figure()
plot_point(Fimp)
plot_finite(Fexp)
post_plot()
# TEST:
#plot_function(function_from_lambda(numpy.sin))
#nanopores.showplots()
#exit()
# get force from explicit molecule
def F_explicit(Qmol):
values = []
for z0 in coord:
geo, phys = Howorka.setup2D(z0=z0, h=hexp, Qmol=Qmol, rMolecule=rMolecule)
dolfin.plot(geo.boundaries, key="b", title="boundaries")
pb, pnps = Howorka.solve2D(geo, phys, Nmax=Nexp, cheapest=True)
dolfin.plot(geo.boundaries, key="b", title="boundaries")
values.append(pnps.zforces())
F, Fel, Fdrag = tuple(function_from_values(v) for v in zip(*values))
return F, Fel, Fdrag
# get force from implicit molecule
def F_implicit(Qmol):
geo, phys = Howorka.setup2D(z0=None, h=himp, Qmol=Qmol, rMolecule=rMolecule)
pb, pnps = Howorka.solve2D(geo, phys, Nmax=Nimp, cheapest=True)
values = [pnps.zforces_implicit(z) for z in coord]
F, Fel, Fdrag = tuple(function_from_values(v) for v in zip(*values))
#pnps.visualize()
return F, Fel, Fdrag
def saveforces(name, F, Fel, Fdrag):
#dolfin.File(name + "_mesh.xml") << geo.mesh
dolfin.File(folder+name + "_F.xml") << F
dolfin.File(folder+name + "_Fel.xml") << Fel
dolfin.File(folder+name + "_Fdrag.xml") << Fdrag
def loadforces(name):
#mesh = Mesh(name + "_mesh.xml")
#V = FunctionSpace(mesh, "CG", 1)
F = dolfin.Function(V, folder+name + "_F.xml")
Fel = dolfin.Function(V, folder+name + "_Fel.xml")
Fdrag = dolfin.Function(V, folder+name + "_Fdrag.xml")
return F, Fel, Fdrag
def saveall(name, Qmol):
name = name + "_Q%.2f" % Qmol
Fi, Feli, Fdragi = F_implicit(Qmol=Qmol)
saveforces(name + "_imp", Fi, Feli, Fdragi)
F, Fel, Fdrag = F_explicit(Qmol=Qmol)
saveforces(name + "_exp", F, Fel, Fdrag)
def loadall(name, Qmol):
name = name + "_Q%.2f" % Qmol
imp = loadforces(name + "_imp")
exp = loadforces(name + "_exp")
return imp, exp
if save:
for Q in Qmols:
saveall(name, Q)
exit()
def construct_alpha(a0):
lpore = 4.5 # TODO
a = 4.
b = 6.
def alpha(z):
if abs(z) > b:
return 1.
elif abs(z) < a:
return a0
else: # abs(z) in [a,b]
return 1. + (b-abs(z))/(b-a)*(a0 - 1)
return function_from_lambda(alpha)
def Forces(name):
for Q in Qmols:
(Fi, Feli, Fdragi), (F, Fel, Fdrag) = loadall(name, Q)
alpha0 = Fdrag(0.0)/Fdragi(0.0)
alpha = construct_alpha(alpha0)
beta0 = Fel(0.0)/Feli(0.0)
beta = construct_alpha(beta0)
Fdragi_better = function_from_lambda(lambda z : Fdragi(z)*alpha(z))
Feli_better = function_from_lambda(lambda z : Feli(z)*beta(z))
Fi_better = function_from_lambda(lambda z : Feli_better(z) + Fdragi_better(z))
yield OrderedDict([("F",F), ("Fi",Fi), ("Fi2",Fi_better),
("alpha",alpha), ("beta",beta), ("Q",Q)])
if __name__ == "__main__":
(Fi, Feli, Fdragi), (F, Fel, Fdrag) = loadall(name, -1.)
plt.figure("Fel", figsize=(5,4))
plot_finite(Fel)
plot_point(Feli)
post_plot()
plt.figure("Fdrag", figsize=(5,4))
plot_finite(Fdrag)
plot_point(Fdragi)
post_plot()
plt.figure("F", figsize=(5,4))
plot_finite(F)
plot_point(Fi)
post_plot()
for odict in Forces(name):
F, Fi, Fi_better, alpha, beta, Q = tuple(odict.values())
print "Q %s, alpha %s" % (Q, alpha(0.))
plt.figure("alpha", figsize=(5,4))
plot_function(alpha, label="Q = %.0fq"%Q)
#plt.figure("beta", figsize=(5,4))
#plot_function(beta, label="Q = %.0f"%Q)
plt.figure("_hybrid_Q"+str(int(Q)), figsize=(5,4))
plot_finite(F)
plot_point(Fi)
plot_function(Fi_better, "-r", label="hybrid-size")
post_plot()
plt.figure("alpha")
#plt.title("r = %s" %rMol)
plt.xlabel("z coordinate of molecule center [nm]")
plt.ylabel("relative drag force correction")
plt.legend(loc="best")
# save figs
DIR = os.path.expanduser("~") + "/papers/pnps-numerics/figure_material/PMF/"
if savefig:
nanopores.savefigs("force_profiles", DIR)
#nanopores.showplots()
| mit |
m0re4u/LeRoT-SCLP | visual_eval/create_histogram.py | 1 | 3474 | from time import sleep
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import argparse
import yaml
import sys
SEAGREEN = (0, 128 / 255., 102 / 255.)
INTERVAL = 500
MINIMUM = 0.03
def create_histogram(filename, iterations, stepsize, x_label, y_label, max_x,
min_y, max_y):
fig = plt.figure(facecolor='white')
datafile = open(filename, "r").read()
y_data = [sorted(data, reverse=True) for data in yaml.load(datafile)]
y_data = [y_data[i*stepsize] for i in range(0, len(y_data) // stepsize)]
y_data = [[max(MINIMUM, d) for d in data] for data in y_data]
x_data = list(range(0, len(y_data[0])))
fargs = [
x_data,
y_data,
fig,
min_y,
max_y,
x_label,
y_label,
iterations
]
animation.FuncAnimation(
fig, animate,
fargs=fargs,
interval=INTERVAL
)
plt.show()
def animate(i, x_data, y_data, fig, min_y, max_y, x_label, y_label,
iterations):
if i > iterations:
sleep(3)
sys.exit()
if i == 1:
sleep(5)
# Reset figure
fig.clear()
# Set text for figure and labels
plt.xlabel(x_label, size=26)
plt.ylabel(y_label, size=26)
plt.title("Iteration" + " " + str(i+1), size=26)
# Set axes sizes
max_x = len(x_data)
plt.ylim(min_y, max_y)
plt.xlim(0, max_x)
plt.bar(x_data, y_data[i], color=SEAGREEN)
# Set visibility of plot frame lines
ax = plt.axes()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(True)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
# Add major grid lines
ax.grid(
which='major',
axis='y',
linestyle='--',
linewidth=0.5,
color='black',
alpha=0.5
)
# Remove the tick marks at top and right
plt.tick_params(axis="both", which="both", bottom="on", top="off",
labelbottom="on", left="on", right="off",
labelleft="on")
return plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Construct and run a set of learning experiments. Provide the
name of the config file, and which parameter you want to be shifted
between what range, with what steps""")
parser.add_argument("-f", "--filename", help="name of file with data")
parser.add_argument("-s", "--stepsize", help="stepsize for the animation",
type=int)
parser.add_argument("-i", "--iterations", type=int,
help="number of iterations shown")
parser.add_argument("-x", "--x_label", help="label for x-axis")
parser.add_argument("-y", "--y_label", help="label for y-axis"
"(HAS TO BE EVALUATION MEASURE LIKE IN CONFIG)")
parser.add_argument("-max_x", "--max_bound_x",
help="maximum number for x-axis", type=int)
parser.add_argument("-max_y", "--max_bound_y",
help="maximum number for y-axis", type=float)
parser.add_argument("-min_y", "--min_bound_y",
help="minimum number for y-axis", type=float)
args = parser.parse_args()
create_histogram(args.filename, args.iterations, args.stepsize,
args.x_label, args.y_label, args.max_bound_x,
args.min_bound_y, args.max_bound_y)
| gpl-3.0 |
ky822/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
fbagirov/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| gpl-2.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/IPython/core/pylabtools.py | 4 | 13260 | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities.
Authors
-------
* Fernando Perez.
* Brian Granger
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'inline' : 'module://IPython.kernel.zmq.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = rcParams['savefig.dpi']
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
from matplotlib.figure import Figure
from IPython.kernel.zmq.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.kernel.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.kernel.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
else:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
| gpl-2.0 |
cxxgtxy/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 61 | 3350 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.one_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
icdishb/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
sequana/sequana | doc/sphinxext/sphinx_gallery/gen_rst.py | 2 | 20235 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
==================
RST file generator
==================
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function, absolute_import
from time import time
import ast
import hashlib
import os
import re
import shutil
import subprocess
import sys
import traceback
import warnings
# Try Python 2 first, otherwise load from Python 3
from textwrap import dedent
try:
# textwrap indent only exists in python 3
from textwrap import indent
except ImportError:
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
# this script can be imported by nosetest to find tests to run: we should
# not impose the matplotlib requirement in that case.
pass
from . import glr_path_static
from .backreferences import write_backreferences, _thumbnail_div
from .notebook import Notebook
try:
basestring
except NameError:
basestring = str
###############################################################################
class Tee(object):
"""A tee object to redirect streams to multiple outputs"""
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
CODE_DOWNLOAD = """**Total running time of the script:**
({0:.0f} minutes {1:.3f} seconds)\n\n
\n.. container:: sphx-glr-download
**Download Python source code:** :download:`{2} <{2}>`\n
\n.. container:: sphx-glr-download
**Download IPython notebook:** :download:`{3} <{3}>`\n"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: sphx-glr-horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: /%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: /%s
:align: center
"""
CODE_OUTPUT = """.. rst-class:: sphx-glr-script-out
Out::
{0}\n"""
def get_docstring_and_rest(filename):
"""Separate `filename` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring: str
docstring of `filename`
rest: str
`filename` content without the docstring
"""
with open(filename) as f:
content = f.read()
node = ast.parse(content)
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if node.body and isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
docstring_node = node.body[0]
docstring = docstring_node.value.s
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = content.split('\n', docstring_node.lineno)[-1]
return docstring, rest
else:
raise ValueError(('Could not find docstring in file "{0}". '
'A docstring is required by sphinx-gallery')
.format(filename))
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
text_block_content = dedent(re.sub(sub_pat, '', text_content))
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks
def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
code_directive = "\n.. code-block:: {0}\n\n".format(lang)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block
def text2string(content):
"""Returns a string without the extra triple quotes"""
try:
return ast.literal_eval(content) + '\n'
except Exception:
return content
def extract_intro(filename):
""" Extract the first paragraph of module-level docstring. max:95 char"""
docstring, _ = get_docstring_and_rest(filename)
# lstrip is just in case docstring has a '\n\n' at the beginning
paragraphs = docstring.lstrip().split('\n\n')
if len(paragraphs) > 1:
first_paragraph = re.sub('\n', ' ', paragraphs[1])
first_paragraph = (first_paragraph[:95] + '...'
if len(first_paragraph) > 95 else first_paragraph)
else:
raise ValueError(
"Example docstring should have a header for the example title "
"and at least a paragraph explaining what the example is about. "
"Please check the example file:\n {}\n".format(filename))
return first_paragraph
def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'r') as src_data:
src_content = src_data.read()
# data needs to be encoded in python3 before hashing
if sys.version_info[0] == 3:
src_content = src_content.encode('utf-8')
src_md5 = hashlib.md5(src_content).hexdigest()
return src_md5
def check_md5sum_change(src_file):
"""Returns True if src_file has a different md5sum"""
src_md5 = get_md5sum(src_file)
src_md5_file = src_file + '.md5'
src_file_changed = True
if os.path.exists(src_md5_file):
with open(src_md5_file, 'r') as file_checksum:
ref_md5 = file_checksum.read()
if src_md5 == ref_md5:
src_file_changed = False
if src_file_changed:
with open(src_md5_file, 'w') as file_checksum:
file_checksum.write(src_md5)
return src_file_changed
def _plots_are_current(src_file, image_file):
"""Test existence of image file and no change in md5sum of
example"""
first_image_file = image_file.format(1)
has_image = os.path.exists(first_image_file)
src_file_changed = check_md5sum_change(src_file)
return has_image and not src_file_changed
def save_figures(image_path, fig_count, gallery_conf):
"""Save all open matplotlib figures of the example code-block
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
Returns
-------
list of strings containing the full path to each figure
"""
figure_list = []
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
current_fig = image_path.format(fig_count + fig_mngr.num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = mlab.get_engine()
last_matplotlib_fig_num = len(figure_list)
total_fig_num = last_matplotlib_fig_num + len(e.scenes)
mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num)
for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):
current_fig = image_path.format(mayavi_fig_num)
mlab.savefig(current_fig, figure=scene)
# make sure the image is not too large
scale_image(current_fig, current_fig, 850, 999)
figure_list.append(current_fig)
mlab.close(all=True)
return figure_list
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the \
generated images')
def save_thumbnail(image_path, base_image_name, gallery_conf):
"""Save the thumbnail image"""
first_image_file = image_path.format(1)
thumb_dir = os.path.join(os.path.dirname(first_image_file), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
if os.path.exists(first_image_file):
scale_image(first_image_file, thumb_file, 400, 280)
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
default_thumb_file = os.path.join(glr_path_static(), 'no_image.png')
default_thumb_file = gallery_conf.get("default_thumb_file",
default_thumb_file)
scale_image(default_thumb_file, thumb_file, 200, 140)
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
"""Generate the gallery reStructuredText for an example directory"""
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print(80 * '_')
print('Example directory %s does not have a README.txt file' %
src_dir)
print('Skipping this directory')
print(80 * '_')
return "" # because string is an expected return type
fhindex = open(os.path.join(src_dir, 'README.txt')).read()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))
if fname.endswith('.py')]
entries_text = []
for fname in sorted_listdir:
amount_of_code = generate_file_rst(fname, target_dir, src_dir,
gallery_conf)
new_fname = os.path.join(src_dir, fname)
intro = extract_intro(new_fname)
write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, intro)
this_entry = _thumbnail_div(target_dir, fname, intro) + """
.. toctree::
:hidden:
/%s/%s\n""" % (target_dir, fname[:-3])
entries_text.append((amount_of_code, this_entry))
# sort to have the smallest entries in the beginning
entries_text.sort()
for _, entry_text in entries_text:
fhindex += entry_text
# clear at the end of the section
fhindex += """.. raw:: html\n
<div style='clear:both'></div>\n\n"""
return fhindex
def execute_script(code_block, example_globals, image_path, fig_count,
src_file, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# We need to execute the code
print('plotting code blocks in %s' % src_file)
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4))
os.chdir(cwd)
figure_list = save_figures(image_path, fig_count, gallery_conf)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
image_list = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
except Exception:
formatted_exception = traceback.format_exc()
print(80 * '_')
print('%s is not compiling:' % src_file)
print(formatted_exception)
print(80 * '_')
figure_list = []
image_list = codestr2rst(formatted_exception, lang='pytb')
# Overrides the output thumbnail in the gallery for easy identification
broken_img = os.path.join(glr_path_static(), 'broken_example.png')
shutil.copyfile(broken_img, os.path.join(cwd, image_path.format(1)))
fig_count += 1 # raise count to avoid overwriting image
# Breaks build on first example error
if gallery_conf['abort_on_example_error']:
raise
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout)
return code_output, time_elapsed, fig_count + len(figure_list)
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
""" Generate the rst file for a given example.
Returns the amout of code (in characters) of the corresponding
files.
"""
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
image_path = os.path.join(image_dir, image_fname)
script_blocks = split_code_and_text_blocks(example_file)
amount_of_code = sum([len(bcontent)
for blabel, bcontent in script_blocks
if blabel == 'code'])
if _plots_are_current(example_file, image_path):
return amount_of_code
time_elapsed = 0
ref_fname = example_file.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
example_nb = Notebook(fname, target_dir)
filename_pattern = gallery_conf.get('filename_pattern')
if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']:
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
example_globals = {'__doc__': ''}
fig_count = 0
# A simple example has two blocks: one for the
# example introduction/explanation and one for the code
is_example_notebook_like = len(script_blocks) > 2
for blabel, bcontent in script_blocks:
if blabel == 'code':
code_output, rtime, fig_count = execute_script(bcontent,
example_globals,
image_path,
fig_count,
src_file,
gallery_conf)
time_elapsed += rtime
example_nb.add_code_cell(bcontent)
if is_example_notebook_like:
example_rst += codestr2rst(bcontent) + '\n'
example_rst += code_output
else:
example_rst += code_output
example_rst += codestr2rst(bcontent) + '\n'
else:
example_rst += text2string(bcontent) + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
else:
for blabel, bcontent in script_blocks:
if blabel == 'code':
example_rst += codestr2rst(bcontent) + '\n'
example_nb.add_code_cell(bcontent)
else:
example_rst += bcontent + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
save_thumbnail(image_path, base_image_name, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
example_nb.save_file()
with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f:
example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname,
example_nb.file_name)
f.write(example_rst)
return amount_of_code
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/core/dtypes/missing.py | 3 | 11778 | """
missing types & inference
"""
import numpy as np
from pandas._libs import lib
from pandas._libs.tslib import NaT, iNaT
from .generic import (ABCMultiIndex, ABCSeries,
ABCIndexClass, ABCGeneric)
from .common import (is_string_dtype, is_datetimelike,
is_datetimelike_v_numeric, is_float_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_complex_dtype, is_categorical_dtype,
is_string_like_dtype, is_bool_dtype,
is_integer_dtype, is_dtype_equal,
needs_i8_conversion, _ensure_object,
pandas_dtype,
is_scalar,
is_object_dtype,
is_integer,
_TD_DTYPE,
_NS_DTYPE)
from .inference import is_list_like
def isna(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isna : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notna: boolean inverse of pandas.isna
pandas.isnull: alias of isna
"""
return _isna(obj)
isnull = isna
def _isna_new(obj):
if is_scalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isna = _isna_new
def _use_inf_as_na(key):
"""Option change callback for na/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
from pandas.core.config import get_option
flag = get_option(key)
if flag:
globals()['_isna'] = _isna_old
else:
globals()['_isna'] = _isna_new
def _isna_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
if is_categorical_dtype(values):
from pandas import Categorical
if not isinstance(values, Categorical):
values = values.values
result = values.isna()
elif is_interval_dtype(values):
from pandas import IntervalIndex
result = IntervalIndex(obj).isna()
else:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(obj):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnaobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif is_datetime64_dtype(dtype):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notna(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
notisna : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isna : boolean inverse of pandas.notna
pandas.notnull : alias of notna
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is NaT or other is None:
return True
elif is_scalar(other):
# a timedelta
if hasattr(other, 'dtype'):
return other.view('i8') == iNaT
elif is_integer(other) and other == iNaT:
return True
return isna(other)
return False
def _isna_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isna(fill_value):
return not (is_bool_dtype(dtype) or
is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
_ensure_object(left.ravel()), _ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
return ((left == right) | (isna(left) & isna(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# NaNs cannot occur otherwise.
try:
return np.array_equal(left, right)
except AttributeError:
# see gh-13388
#
# NumPy v1.7.1 has a bug in its array_equal
# function that prevents it from correctly
# comparing two arrays with complex dtypes.
# This bug is corrected in v1.8.0, so remove
# this try-except block as soon as we stop
# supporting NumPy versions < 1.8.0
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.tolist()
right = right.tolist()
return left == right
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(_ensure_object(val))
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatiable fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
Returns
-------
np.dtype or a pandas dtype
"""
dtype = pandas_dtype(dtype)
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
return 0
elif is_bool_dtype(dtype):
return False
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
return arr[notna(lib.values_from_object(arr))]
| gpl-2.0 |
dewtx29/python_ann | python/pydev_ann/LearnNN/learnNN5.py | 1 | 3575 | from matplotlib import pyplot
from math import cos, sin, atan
import numpy as np
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons, weights):
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
self.weights = weights
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return horizontal_distance_between_neurons * (number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2, linewidth):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = neuron_radius * sin(angle)
y_adjustment = neuron_radius * cos(angle)
line_x_data = (neuron1.x - x_adjustment, neuron2.x + x_adjustment)
line_y_data = (neuron1.y - y_adjustment, neuron2.y + y_adjustment)
line = pyplot.Line2D(line_x_data, line_y_data, linewidth=linewidth)
pyplot.gca().add_line(line)
def draw(self):
for this_layer_neuron_index in range(len(self.neurons)):
neuron = self.neurons[this_layer_neuron_index]
neuron.draw()
if self.previous_layer:
for previous_layer_neuron_index in range(len(self.previous_layer.neurons)):
previous_layer_neuron = self.previous_layer.neurons[previous_layer_neuron_index]
weight = self.previous_layer.weights[this_layer_neuron_index, previous_layer_neuron_index]
self.__line_between_two_neurons(neuron, previous_layer_neuron, weight)
class NeuralNetwork():
def __init__(self):
self.layers = []
def add_layer(self, number_of_neurons, weights=None):
layer = Layer(self, number_of_neurons, weights)
self.layers.append(layer)
def draw(self):
for layer in self.layers:
layer.draw()
pyplot.axis('scaled')
pyplot.show()
if __name__ == "__main__":
vertical_distance_between_layers = 6
horizontal_distance_between_neurons = 2
neuron_radius = 0.5
number_of_neurons_in_widest_layer = 4
network = NeuralNetwork()
# weights to convert from 10 outputs to 4 (decimal digits to their binary representation)
weights1 = np.array([\
[0,0,0,0,0,0,0,0,1,1],\
[0,0,0,0,1,1,1,1,0,0],\
[0,0,1,1,0,0,1,1,0,0],\
[0,1,0,1,0,1,0,1,0,1]])
network.add_layer(10, weights1)
network.add_layer(4)
network.draw() | gpl-3.0 |
ArtsiomCh/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 13 | 19945 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
adykstra/mne-python | mne/utils/config.py | 1 | 18971 | # -*- coding: utf-8 -*-
"""The config functions."""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import atexit
from functools import partial
import inspect
from io import StringIO
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import numpy as np
from .check import _validate_type
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir: str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size: str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_BROWSE_RAW_SIZE',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_USE_CUDA',
'MNE_SKIP_FS_FLASH_CALL',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
if key is None:
warn('set_config(key=None, value=None) to get a list of valid keys '
'has been deprecated and will be removed in version 0.19. Use '
'get_config(key='') instead.', DeprecationWarning)
return known_config_types
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, type(None)), "value",
"None or string")
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from ..io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.join(op.dirname(__file__), '..'))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def sys_info(fid=None, show_paths=False):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.2.0-27-generic-x86_64-with-Ubuntu-15.10-wily
Python: 2.7.10 (default, Oct 14 2015, 16:09:02) [GCC 5.2.1 20151010]
Executable: /usr/bin/python
mne: 0.12.dev0
numpy: 1.12.0.dev0+ec5bd81 {lapack=mkl_rt, blas=mkl_rt}
scipy: 0.18.0.dev0+3deede3
matplotlib: 1.5.1+1107.g1fa2697
sklearn: 0.18.dev0
nibabel: 2.1.0dev
mayavi: 4.3.1
cupy: 4.1.0
pandas: 0.17.1+25.g547750a
dipy: 0.14.0
""" # noqa: E501
ljust = 15
out = 'Platform:'.ljust(ljust) + platform.platform() + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: ' % platform.processor())
try:
import multiprocessing
except ImportError:
out += ('number of processors unavailable ' +
'(requires "multiprocessing" package)\n')
else:
out += '%s cores\n' % multiprocessing.cpu_count()
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
old_stdout = sys.stdout
capture = StringIO()
try:
sys.stdout = capture
np.show_config()
finally:
sys.stdout = old_stdout
lines = capture.getvalue().split('\n')
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
try:
lib = lib.split('[')[1].split("'")[1]
except IndexError:
pass # keep whatever it was
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'nibabel', 'mayavi', 'cupy', 'pandas', 'dipy'):
if mod_name == '':
out += '\n'
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
extra = (' (%s)' % op.dirname(mod.__file__)) if show_paths else ''
if mod_name == 'numpy':
extra = ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra = ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'mayavi':
try:
from pyface.qt import qt_api
except Exception:
qt_api = 'unknown'
if qt_api == 'pyqt5':
try:
from PyQt5.Qt import PYQT_VERSION_STR
qt_api += ', PyQt5=%s' % (PYQT_VERSION_STR,)
except Exception:
pass
extra = ' {qt_api=%s}%s' % (qt_api, extra)
out += '%s%s\n' % (mod.__version__, extra)
print(out, end='', file=fid)
def _get_call_line(in_verbose=False):
"""Get the call line from within a function."""
# XXX Eventually we could auto-triage whether in a `verbose` decorated
# function or not.
# NB This probably only works for functions that are undecorated,
# or decorated by `verbose`.
back = 2 if not in_verbose else 4
call_frame = inspect.getouterframes(inspect.currentframe())[back][0]
context = inspect.getframeinfo(call_frame).code_context
context = 'unknown' if context is None else context[0].strip()
return context
| bsd-3-clause |
xuewei4d/scikit-learn | examples/linear_model/plot_sgd_iris.py | 64 | 2208 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, max_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired, edgecolor='black', s=20)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/decomposition/pca.py | 21 | 25995 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
# Giorgio Patrini <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..externals import six
from .base import _BasePCA
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.arpack import svds
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space.
It uses the scipy.linalg ARPACK implementation of the SVD or a randomized
SVD by the method of Halko et al. 2009, depending on which is the most
efficient.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle' and svd_solver == 'full', Minka\'s MLE is used
to guess the dimension
if ``0 < n_components < 1`` and svd_solver == 'full', select the number
of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components
n_components cannot be equal to n_features for svd_solver == 'arpack'.
copy : bool (default True)
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional (default False)
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : string {'auto', 'full', 'arpack', 'randomized'}
auto :
the solver is selected by a default policy based on `X.shape` and
`n_components` which favors 'randomized' when the problem is
computationally demanding for 'full' PCA
full :
run exact SVD calling ARPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < X.shape[1]
randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float >= 0, optional (default .0)
Tolerance for singular values computed by svd_solver == 'arpack'.
.. versionadded:: 0.18.0
iterated_power : int >= 0, optional (default 4)
Number of iterations for the power method computed by
svd_solver == 'randomized'.
.. versionadded:: 0.18.0
random_state : int or RandomState instance or None (default None)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Used by svd_solver == 'arpack' or 'randomized'.
.. versionadded:: 0.18.0
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
explained_variance_.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=1)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or n_features if n_components is None.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
References
----------
For n_components == 'mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
`Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
`A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, iterated_power=4, n_components=2, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PCA(copy=True, iterated_power=4, n_components=2, random_state=None,
svd_solver='full', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(copy=True, iterated_power=4, n_components=1, random_state=None,
svd_solver='arpack', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244...]
See also
--------
KernelPCA
SparsePCA
TruncatedSVD
IncrementalPCA
"""
def __init__(self, n_components=None, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power=4,
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
X = check_array(X, dtype=[np.float64], ensure_2d=True,
copy=self.copy)
# Handle n_components==None
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
# Handle svd_solver
svd_solver = self.svd_solver
if svd_solver == 'auto':
# Small problem, just call full PCA
if max(X.shape) <= 500:
svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
svd_solver = 'full'
# Call different fits for either full or truncated SVD
if svd_solver == 'full':
return self._fit_full(X, n_components)
elif svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, svd_solver)
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r must be between 0 and "
"n_features=%r with svd_solver='full'"
% (n_components, n_features))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / n_samples
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
return U, S, V
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X
"""
n_samples, n_features = X.shape
if isinstance(n_components, six.string_types):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= n_features:
raise ValueError("n_components=%r must be between 1 and "
"n_features=%r with svd_solver='%s'"
% (n_components, n_features, svd_solver))
elif svd_solver == 'arpack' and n_components == n_features:
raise ValueError("n_components=%r must be stricly less than "
"n_features=%r with svd_solver='%s'"
% (n_components, n_features, svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
# random init solution, as ARPACK does it internally
v0 = random_state.uniform(-1, 1, size=min(X.shape))
U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U[:, ::-1], V[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, V = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = V
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / n_samples
total_var = np.var(X, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
if self.n_components_ < n_features:
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
else:
self.noise_variance_ = 0.
return U, S, V
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
@deprecated("RandomizedPCA will be removed in 0.20. "
"Use PCA(svd_solver='randomized') instead. The new implementation "
"DOES NOT store whiten components_. Apply transform to get them.")
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are multiplied by
the square root of (n_samples) and divided by the singular values to
ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
xuanyuanking/spark | python/pyspark/pandas/tests/data_type_ops/test_datetime_ops.py | 7 | 8163 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DatetimeOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series(pd.date_range("1994-1-31 10:30:15", periods=3, freq="M"))
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def some_datetime(self):
return datetime.datetime(1994, 1, 31, 10, 30, 00)
def test_add(self):
self.assertRaises(TypeError, lambda: self.psser + "x")
self.assertRaises(TypeError, lambda: self.psser + 1)
self.assertRaises(TypeError, lambda: self.psser + self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
self.assert_eq(
(self.pser - self.some_datetime).dt.total_seconds().astype("int"),
self.psser - self.some_datetime,
)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.pser_psser_pairs:
if pser.dtype == np.dtype("<M8[ns]"):
self.assert_eq(
(self.pser - pser).dt.total_seconds().astype("int"),
(self.psser - psser).sort_index(),
)
else:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
self.assertRaises(TypeError, lambda: self.psser * self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
self.assertRaises(TypeError, lambda: self.psser / self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
self.assertRaises(TypeError, lambda: self.psser // self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
self.assertRaises(TypeError, lambda: self.psser % self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
self.assertRaises(TypeError, lambda: self.psser ** self.some_datetime)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
self.assert_eq(
(self.some_datetime - self.pser).dt.total_seconds().astype("int"),
self.some_datetime - self.psser,
)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 1 * self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = pd.date_range("1994-1-31 10:30:15", periods=3, freq="M")
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pser.astype(str), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=["a", "b", "c"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_datetime_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
denizs/torchUp | torchup/agents/agent_base.py | 1 | 22373 | '''
.. module: AgentBase
.. moduleauthor: Deniz Saner <[email protected]>
Base Agents
===============================
The AgentBase module implements the necessary BaseClasses of our naive and
Actor Critic Agents, reducing boilerplate in the actual implementations of our
algorithms. For actor critic agents, refer to the `BaseActorCriticAgent`_, for naive
agents to `BaseAgent`_.
'''
from __future__ import division
from collections import OrderedDict
from emoji import emojize
from torchup.env import Env
import gym
from gym.spaces import (Discrete, Box)
import inspect
from itertools import count
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import random
try:
import roboschool
except ModuleNotFoundError: # pragma: no cover
pass # pragma: no cover
import torch
import torch.nn.functional as F
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
from torch.optim.optimizer import Optimizer
# Internal imports
from torchup.base.models import (BaseModel, Variable)
from torchup.models import (DQNModel, A3CDiscreteModel, A3CContinuousModel)
from torchup.replay_memory import ReplayMemory
from torchup.utils.utils import (
Transition,
default_fallback_message,
type_error,
train_start_message,
error_message,
missing_kwarg)
import torchvision.transforms as T
logging.basicConfig(level=logging.INFO)
class BaseAgent(BaseModel):
'''
``BaseAgent`` implements the core functionality of our naive Reinforcement Learning
agents. Its ``__init__`` method is highly customizable, enabling us to
use it in a plug-n-play manner. Every single RL-agent is subclassing the
``BaseAgent``. Following parameters can be setup during initialization:
:param gamma: Which discount factor for future rewards should be used?
:param env: The env to use, defaulting to OpenAI gym's 'CartPole-v0' env.
:param rgb_input: Determines, whether the model should take 3 or 1 input channel
:param frame_skipping_rate: Frame skipping rate.
:param history_length: How many past seen frames are to be kept in the current state.
:param training: Flag indicating, wehther the agent is in training. This is
:param checkpoint_interval: The interval at which the models and frames are saved
to disk. A ``checkpoint_interval`` of 1000 means that if ``episde % 1000 is 0``,
the model state dict and the image frames will be saved to disk.
'''
def __init__(self,
gamma=0.99,
env=None,
frame_skipping_rate=4,
history_length=4,
**kwargs):
super(BaseAgent, self).__init__()
# Log configuration. ``dump_attrs`` decides, which attrs get dumped
self.dump_attrs = [
'gamma',
]
# Are we in training?
self.training = False
# step
self.step = 0
# Checkpoint intervals:
self.checkpoint_interval = 1000
# should we use cuda?
self.use_cuda = torch.cuda.is_available()
self.fdt = torch.cuda.FloatTensor if self.use_cuda else torch.FloatTensor
self.ldt = torch.cuda.LongTensor if self.use_cuda else torch.LongTensor
# Discount factor for our update equation
self.gamma = gamma
self.env = env
def _check_kwarg(self, var_name, var, expected_class):
'''
This method checks whether the given kwarg is ``None`` or is of
invalid type. As there are multiple optmizers and loss functions
and one defines a custom model by subclassing from
``torch.nn.Module``, we perform typechecks by asserting that ``var``
is a subclass of ``expected_class``. In case we encounter an error,
we first logging.debug our custom error message and then
raise a ``TypeError``.
:param var_name: Variable name which is passed into the respective
error message generator function.
:param var: The variable's value
:param expected_class: The expected class of the variable
Usage:
=======
.. code:: python
valid = check_kwarg('model', model, expected_class)
if not valid:
return
# do something
'''
# First let's identify where the call came from:
stack = inspect.stack()
cls_name = stack[1][0].f_locals['self'].__class__
caller_fn = stack[1][0].f_code.co_name
# check for None:
if var is None:
err_message = error_message(
missing_kwarg(var_name,
expected_class,
caller_fn,
cls_name))
logging.error(err_message)
raise TypeError('missing:{}'.format(var_name))
# type check as described in the docstring
try:
assert issubclass(var, expected_class)
except AssertionError:
err_message = error_message(
type_error(var_name, var,
expected_class,
caller_fn,
cls_name))
logging.error(err_message)
raise TypeError('type_error:{}'.format(var_name))
def optimize(self):
'''
Predeclared method used to perform optimization on the model as the
algorithm forsees.
'''
raise NotImplementedError('BaseAgent does not implement this method')
def select_action(self, state):
'''
Predeclared method used to select an action based on an agent's policy.
'''
raise NotImplementedError('BaseAgent does not implement this method')
def train(self, num_episodes=10000):
'''
Predeclared method used to train the agent for a specific
task.
'''
raise NotImplementedError('BaseAgent does not implement this method')
def _process(self, state=None):
'''
Predeclared method
'''
raise NotImplementedError('BaseAgent does not implement this method')
class BaseA3CWorker(BaseAgent):
'''
This class implements the base class of our ``A3CWorker``s.
'''
def __init__(self,
regularization_factor=0.01,
rollout_steps=5,
optimizer=None,
server=None,
uid=None,
input_height=84,
history_length=3,
**kwargs):
super(BaseA3CWorker, self).__init__(uid=uid,
rgb_input=True,
history_length=history_length,
**kwargs)
# Now let's initilize our agent's model. We will need to pass in the `env`s
# `action_space` and the history length, which the model will internally
# use to calculate the input channels of the first convolutional layer:
# `history_length * 3 if rgb_input, else * 1`
# TODO: implement check, whether discrete or not:
if isinstance(self.env.action_space, Box):
msg = error_message('No continuous action model yet!')
logging.error(msg)
return
else:
self.model = A3CDiscreteModel(action_space=self.env.action_space,
history_length=history_length,
input_height=input_height)
# How many steps should our learners take during our _rollout?
self.rollout_steps = rollout_steps
# We need a copy of our server in order to interact with it over the
# course of our training. Let's throw an error if `server` is `None`
# and assign it to the Thread instance:
assert server is not None
self.server = server
assert optimizer is not None
self.optimizer = optimizer
# Now let's ensure that our worker has a thread uid and build the thread
# name. We will follow the convention of prefixing our threads:
# 'worker-{thread_id}'
assert uid is not None and type(uid) is int
self.uid = uid
self.name = 'worker-{}'.format(uid)
# Thread step_count. This determines, whether our learner has reached max
# steps
self.step_count = 1
# Let's set our regularization_factor. It is multiplied with the policy's
# entropy and hence controls the weight of the regularization.
assert regularization_factor is not None
self.regularization_factor = regularization_factor
# Define resize
self.resize = T.Compose([
T.ToPILImage(),
T.Scale(input_height, interpolation=Image.CUBIC),
T.ToTensor()])
class BaseDQNAgent(BaseAgent):
'''
``BaseDQNAgent`` implements DQN specific functionality.
'''
def __init__(self,
eps_decay_start=1.0,
eps_decay_stop=0.1,
final_exploration_frame=int(1e6),
model=None,
memory_cap=int(1e6),
sample_batch_size=32,
loss_function=nn.MSELoss,
optimizer=None,
learning_rate=0.0002,
target_model_update_freq=10000,
**kwargs):
'''
:param gamma: Which discount factor for future rewards should be used?
:param env: Env instance
:param eps_decay_start: Start value for eps
:param eps_decay_stop: Final value for eps
:param final_exploration_frame: Where to stop annealing the eps value of eps-greedy
:param model: The model class that the agent should use. Note that you should
simply pass in the class into the constructor. Instantiation is handled
in ``BaseAgent``
:param memory_cap: How many entries should fit into our replay memory?
:param sample_batch_size: Which size should our random batch samples have?
:param loss_function: The loss function CLASS the agent should use.
:param optimizer: The optimizer CLASS the agent should use.
:param target_model_update_freq: The interval for the target model hard update
:param learning_rate: The learning rate the optimizer should use.
:param num_training_episodes: The number of episodes to complete during training.
used to add exploratory noise or follow an epsilon greedy policy during training.
:param checkpoint_interval: The interval at which the models and frames are saved
to disk. A ``checkpoint_interval`` of 1000 means that if ``episde % 1000 is 0``,
the model state dict and the image frames will be saved to disk.
'''
super(BaseDQNAgent, self).__init__(**kwargs)
self.dump_attrs += [
'target_model_update_freq',
'eps_decay_start',
'eps_decay_stop',
'final_exploration_frame',
'loss_function',
'optimizer',
'memory_cap',
'sample_batch_size',
]
self.incompatible_attrs += [
'loss_function',
'optimizer'
]
# Target model update factor and counter
self.target_model_update_freq = target_model_update_freq
self.n_backprop = 0
# The agents model. Expects an a `torch.nn.Module`
# Also, if cuda is enabled, we want to offload computation to our GPU.
# Hence, after intantiating our model, we call `cuda()` on it.
# Let's check wether our model is valid
self._check_kwarg('model', model, torch.nn.Module)
# After, we instantiate the model with the number of actions
# we obtain by calling `action_space.n` on `self.env`. This keeps
# the api modular and open for further extensions.
hl = self.env.observation_space.shape[2]
self.model = model(history_length=hl,
action_space=self.env.action_space).type(self.fdt)
self.target_model = model(history_length=hl,
action_space=self.env.action_space).type(self.fdt)
# Before we start any training, we need to ensure, that our target model
# is has the same weights and biases throughout its modules. We achieve
# this by simply loading the state dict of our online model via `load_state_dict`.
# This dict contains the values of all weights and biases.
self.target_model.load_state_dict(self.model.state_dict())
# Replay mem setup
self.memory_cap = memory_cap
self.memory = ReplayMemory(memory_cap)
self.sample_batch_size = sample_batch_size
# let's check whether our loss function is valid
self._check_kwarg(
'loss_function',
loss_function,
torch.nn.modules.loss._Loss)
self.loss_function = loss_function()
# let's check whether our optimizer is valid
self._check_kwarg(
'optimizer',
optimizer,
Optimizer)
self.optimizer = optimizer(self.model.parameters(),
lr=learning_rate)
# eps decay
if not kwargs.get('eps_decay_start'):
msg = default_fallback_message('eps_decay_start')
logging.debug(msg)
if not kwargs.get('eps_decay_stop'):
msg = default_fallback_message('eps_decay_stop')
logging.debug(msg)
if not kwargs.get('final_exploration_frame'):
msg = default_fallback_message('final_exploration_frame')
logging.debug(msg)
self.eps_decay_start = eps_decay_start
self.eps_decay_stop = eps_decay_stop
self.final_exploration_frame = final_exploration_frame
class BaseActorCriticAgent(BaseAgent):
'''
BaseActorCriticAgent implements the scaffold for our Actor Critic agents.
:param memory_cap: Replay memory capacity. Defaults to 1.000.000.
:param sample_batch_size: Mini batch size of optimization updates. Defaults
to 16.
:param gamma: Discount factor :math:`\gamma`. Defaults to 0.99.
:param actor: Actor model. Defaults to ``None``.
:param critic: Critic model. Defaults to ``None``.
:param target_model_update_factor: Factor :math:`\\tau` of soft param-updates.
:param env: Env. Defaulting to ``CartPole-v0``.
:param optimizer: Optimizer to be used. Defaults to ``toch.optim.Adam``.
:param learning_rate: The learning rate passed into the ``optimizer``.
:param loss_function: The loss function to be used. Defaults to ``None``.
:param enable_frame_skipping: Whether frame skipping is enabled. Defaults to
``True``.
:param num_training_episodes: Numbers of episodes to complete during training.
:param frame_skipping_rate: The frameskipping rate, determining on which
frequency the agent picks actions. A value of ``3`` means that the agent
picks actions on every third frame.
:param rgb_input: Whether to use the 3 channel RGB representation of a frame
as input. Defaults to ``True``.
:param training: Flag indicating, wehther the agent is in training. This is
used to add exploratory noise or follow an epsilon greedy policy during training.
'''
def __init__(self,
memory_cap=1000000,
sample_batch_size=16,
gamma=0.99,
actor=None,
critic=None,
target_model_update_factor=0.0001,
env='MountainCarContinuous-v0',
optimizer=optim.Adam,
learning_rate=0.0002,
loss_function=None,
enable_frame_skipping=True,
frame_skipping_rate=3,
rgb_input=True,
training=True,
num_training_episodes=100,
history_length=3,
**kwargs):
# Fortunately, most of the heavy lifting is done by our `BaseAgent` class's
# `__init__` except for our models - actor, actor_target, critic, critic target,
# which we have to check for validity ourselves. But first let's call our
# super class's init method:
super(BaseActorCriticAgent, self).__init__(gamma=gamma,
env=env,
rgb_input=rgb_input,
frame_skipping_rate=frame_skipping_rate,
training=training,
enable_frame_skipping=enable_frame_skipping,
history_length=history_length,
**kwargs)
# Log configuration. `dump_attrs` decides, which attrs get dumped
self.dump_attrs = [
'env',
'rgb_input',
'enable_frame_skipping',
'frame_skipping_rate',
'num_training_episodes',
'target_model_update_factor',
'actor',
'actor_target',
'critic',
'critic_target',
'memory_cap',
'sample_batch_size',
'gamma',
'use_cuda',
'loss_function',
'optimizer',
'learning_rate'
]
# Incompatible_attrs points to attributes, which are class instances.
# On those, we need to call `__class__.__name__`
self.incompatible_attrs = [
'env',
'actor',
'actor_target',
'critic',
'critic_target',
'loss_function',
'optimizer'
]
# How many training episodes should we complete?
self.num_training_episodes = num_training_episodes
# Let's check whether agent should use a replay memory and if so,
# intantiate it with a given capacity and set its save sample
# batch size.
self.memory_cap = memory_cap
self.memory = ReplayMemory(memory_cap)
self.sample_batch_size = sample_batch_size
# The target models' parameters are updated with via soft updates.
# They are adjustes towads the online models' params by following equation,
# utilizing the param target_model_update_factor (fac):
# `new_target_params = old_target_params * (1 - fac) + online_params * fac`.
self.target_model_update_factor = target_model_update_factor
# `BaseAgent` comes along with a convenient method to validate our
# kwargs: `_check_kwarg`. Let's use it to validate our actor and critic models:
self._check_kwarg(
'actor',
actor,
torch.nn.Module)
self.actor = actor(action_space=self.env.action_space,
history_length=history_length).type(self.fdt)
self.actor_target = actor(action_space=self.env.action_space,
history_length=history_length).type(self.fdt)
self._check_kwarg(
'critic',
critic,
torch.nn.Module)
self.critic = critic(action_space=self.env.action_space,
history_length=history_length).type(self.fdt)
self.critic_target = critic(action_space=self.env.action_space,
history_length=history_length).type(self.fdt)
# Initialize loss function
# let's check whether our loss function is valid
self._check_kwarg(
'loss_function',
loss_function,
torch.nn.modules.loss._Loss)
self.critic_loss_function = loss_function()
# Initilaize optimizers. We won't initilaize optimizers for our target
# models as we will soft update those.
self.learning_rate = learning_rate
self.actor_optimizer = optimizer(self.actor.parameters(),
lr=learning_rate)
self.critic_optimizer = optimizer(self.critic.parameters(),
lr=learning_rate)
def _soft_update(self):
# Aggregate our variables:
# We'll need the update factor and the online and target models'
# `state_dict`s:
fac = self.target_model_update_factor
a_p = self.actor.state_dict()
c_p = self.critic.state_dict()
a_t_p = self.actor_target.state_dict()
c_t_p = self.critic_target.state_dict()
# Construct the new parameters while ensuring immutable op.
actor_update = OrderedDict({key: val.mul(1 - fac) + a_p[key].mul(fac)
for key, val in a_t_p.items()})
critic_update = OrderedDict({key: val.mul(1 - fac) + c_p[key].mul(fac)
for key, val in c_t_p.items()})
# Load in the new params
self.actor_target.load_state_dict(actor_update)
self.critic_target.load_state_dict(critic_update)
def optimize(self):
'''
Predeclared method used to perform optimization on the model as the
algorithm forsees.
'''
raise NotImplementedError('BaseActorCriticAgent does not implement this method')
def select_action(self, state):
'''
Predeclared method used to select an action based on an agent's policy.
'''
raise NotImplementedError('BaseActorCriticAgent does not implement this method')
def _process(self, state=None):
'''
Predeclared method
'''
raise NotImplementedError('BaseActorCriticAgent does not implement this method')
def train(self, num_episodes=10000):
'''
Predeclared method used to train the agent for a specific
task.
'''
raise NotImplementedError('BaseActorCriticAgent does not implement this method')
| bsd-2-clause |
rahuldhote/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
chrsrds/scikit-learn | examples/inspection/plot_permutation_importance.py | 1 | 7490 | """
================================================================
Permutation Importance vs Random Forest Feature Importance (MDI)
================================================================
In this example, we will compare the impurity-based feature importance of
:class:`~sklearn.ensemble.RandomForestClassifier` with the
permutation importance on the titanic dataset using
:func:`~sklearn.inspection.permutation_importance`. We will show that the
impurity-based feature importance can inflate the importance of numerical
features.
Furthermore, the impurity-based feature importance of random forests suffers
from being computed on statistics derived from the training dataset: the
importances can be high even for features that are not predictive of the target
variable, as long as the model has the capacity to use them to overfit.
This example shows how to use Permutation Importances as an alternative that
can mitigate those limitations.
.. topic:: References:
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. https://doi.org/10.1023/A:1010933404324
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
##############################################################################
# Data Loading and Feature Engineering
# ------------------------------------
# Let's use pandas to load a copy of the titanic dataset. The following shows
# how to apply separate preprocessing on numerical and categorical features.
#
# We further include two random variables that are not correlated in any way
# with the target variable (``survived``):
#
# - ``random_num`` is a high cardinality numerical variable (as many unique
# values as records).
# - ``random_cat`` is a low cardinality categorical variable (3 possible
# values).
X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True)
X['random_cat'] = np.random.randint(3, size=X.shape[0])
X['random_num'] = np.random.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42)
categorical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
numerical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='mean'))
])
preprocessing = ColumnTransformer(
[('cat', categorical_pipe, categorical_columns),
('num', numerical_pipe, numerical_columns)])
rf = Pipeline([
('preprocess', preprocessing),
('classifier', RandomForestClassifier(random_state=42))
])
rf.fit(X_train, y_train)
##############################################################################
# Accuracy of the Model
# ---------------------
# Prior to inspecting the feature importances, it is important to check that
# the model predictive performance is high enough. Indeed there would be little
# interest of inspecting the important features of a non-predictive model.
#
# Here one can observe that the train accuracy is very high (the forest model
# has enough capacity to completely memorize the training set) but it can still
# generalize well enough to the test set thanks to the built-in bagging of
# random forests.
#
# It might be possible to trade some accuracy on the training set for a
# slightly better accuracy on the test set by limiting the capacity of the
# trees (for instance by setting ``min_samples_leaf=5`` or
# ``min_samples_leaf=10``) so as to limit overfitting while not introducing too
# much underfitting.
#
# However let's keep our high capacity random forest model for now so as to
# illustrate some pitfalls with feature importance on variables with many
# unique values.
print("RF train accuracy: %0.3f" % rf.score(X_train, y_train))
print("RF test accuracy: %0.3f" % rf.score(X_test, y_test))
##############################################################################
# Tree's Feature Importance from Mean Decrease in Impurity (MDI)
# --------------------------------------------------------------
# The impurity-based feature importance ranks the numerical features to be the
# most important features. As a result, the non-predictive ``random_num``
# variable is ranked the most important!
#
# This problem stems from two limitations of impurity-based feature
# importances:
#
# - impurity-based importances are biased towards high cardinality features;
# - impurity-based importances are computed on training set statistics and
# therefore do not reflect the ability of feature to be useful to make
# predictions that generalize to the test set (when the model has enough
# capacity).
ohe = (rf.named_steps['preprocess']
.named_transformers_['cat']
.named_steps['onehot'])
feature_names = ohe.get_feature_names(input_features=categorical_columns)
feature_names = np.r_[feature_names, numerical_columns]
tree_feature_importances = (
rf.named_steps['classifier'].feature_importances_)
sorted_idx = tree_feature_importances.argsort()
y_ticks = np.arange(0, len(feature_names))
fig, ax = plt.subplots()
ax.barh(y_ticks, tree_feature_importances[sorted_idx])
ax.set_yticklabels(feature_names[sorted_idx])
ax.set_yticks(y_ticks)
ax.set_title("Random Forest Feature Importances (MDI)")
fig.tight_layout()
plt.show()
##############################################################################
# As an alternative, the permutation importances of ``rf`` are computed on a
# held out test set. This shows that the low cardinality categorical feature,
# ``sex`` is the most important feature.
#
# Also note that both random features have very low importances (close to 0) as
# expected.
result = permutation_importance(rf, X_test, y_test, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=X_test.columns[sorted_idx])
ax.set_title("Permutation Importances (test set)")
fig.tight_layout()
plt.show()
##############################################################################
# It is also possible to compute the permutation importances on the training
# set. This reveals that ``random_num`` gets a significantly higher importance
# ranking than when computed on the test set. The difference between those two
# plots is a confirmation that the RF model has enough capacity to use that
# random numerical feature to overfit. You can further confirm this by
# re-running this example with constrained RF with min_samples_leaf=10.
result = permutation_importance(rf, X_train, y_train, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=X_train.columns[sorted_idx])
ax.set_title("Permutation Importances (train set)")
fig.tight_layout()
plt.show()
| bsd-3-clause |
shusenl/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_artifacts_correction_ica.py | 1 | 13053 | """
.. _tut_artifacts_correct_ica:
Artifact Correction with ICA
============================
ICA finds directions in the feature space
corresponding to projections with high non-Gaussianity. We thus obtain
a decomposition into independent components, and the artifact's contribution
is localized in only a small number of components.
These components have to be correctly identified and removed.
If EOG or ECG recordings are available, they can be used in ICA to
automatically select the corresponding artifact components from the
decomposition. To do so, you have to first build an :class:`mne.Epochs` object
around blink or heartbeat events.
ICA is implemented in MNE using the :class:`mne.preprocessing.ICA` class,
which we will review here.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import ICA
from mne.preprocessing import create_eog_epochs, create_ecg_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# 1Hz high pass is often helpful for fitting ICA
raw.filter(1., 40., n_jobs=2, fir_design='firwin')
picks_meg = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
###############################################################################
# Before applying artifact correction please learn about your actual artifacts
# by reading :ref:`tut_artifacts_detect`.
#
# .. warning:: ICA is sensitive to low-frequency drifts and therefore
# requires the data to be high-pass filtered prior to fitting.
# Typically, a cutoff frequency of 1 Hz is recommended. Note that
# FIR filters prior to MNE 0.15 used the ``'firwin2'`` design
# method, which generally produces rather shallow filters that
# might not work for ICA processing. Therefore, it is recommended
# to use IIR filters for MNE up to 0.14. In MNE 0.15, FIR filters
# can be designed with the ``'firwin'`` method, which generally
# produces much steeper filters. This method will be the default
# FIR design method in MNE 0.16. In MNE 0.15, you need to
# explicitly set ``fir_design='firwin'`` to use this method. This
# is the recommended filter method for ICA preprocessing.
###############################################################################
# Fit ICA
# -------
#
# ICA parameters:
n_components = 25 # if float, select n_components by explained variance of PCA
method = 'fastica' # for comparison with EEGLAB try "extended-infomax" here
decim = 3 # we need sufficient statistics, not all time points -> saves time
# we will also set state of the random number generator - ICA is a
# non-deterministic algorithm, but we want to have the same decomposition
# and the same order of components each time this tutorial is run
random_state = 23
###############################################################################
# Define the ICA object instance
ica = ICA(n_components=n_components, method=method, random_state=random_state)
print(ica)
###############################################################################
# we avoid fitting ICA on crazy environmental artifacts that would
# dominate the variance and decomposition
reject = dict(mag=5e-12, grad=4000e-13)
ica.fit(raw, picks=picks_meg, decim=decim, reject=reject)
print(ica)
###############################################################################
# Plot ICA components
ica.plot_components() # can you spot some potential bad guys?
###############################################################################
# Component properties
# --------------------
#
# Let's take a closer look at properties of first three independent components.
# first, component 0:
ica.plot_properties(raw, picks=0)
###############################################################################
# we can see that the data were filtered so the spectrum plot is not
# very informative, let's change that:
ica.plot_properties(raw, picks=0, psd_args={'fmax': 35.})
###############################################################################
# we can also take a look at multiple different components at once:
ica.plot_properties(raw, picks=[1, 2], psd_args={'fmax': 35.})
###############################################################################
# Instead of opening individual figures with component properties, we can
# also pass an instance of Raw or Epochs in ``inst`` arument to
# ``ica.plot_components``. This would allow us to open component properties
# interactively by clicking on individual component topomaps. In the notebook
# this woks only when running matplotlib in interactive mode (``%matplotlib``).
# uncomment the code below to test the inteactive mode of plot_components:
# ica.plot_components(picks=range(10), inst=raw)
###############################################################################
# Advanced artifact detection
# ---------------------------
#
# Let's use a more efficient way to find artefacts
eog_average = create_eog_epochs(raw, reject=dict(mag=5e-12, grad=4000e-13),
picks=picks_meg).average()
eog_epochs = create_eog_epochs(raw, reject=reject) # get single EOG trials
eog_inds, scores = ica.find_bads_eog(eog_epochs) # find via correlation
ica.plot_scores(scores, exclude=eog_inds) # look at r scores of components
# we can see that only one component is highly correlated and that this
# component got detected by our correlation analysis (red).
ica.plot_sources(eog_average, exclude=eog_inds) # look at source time course
###############################################################################
# We can take a look at the properties of that component, now using the
# data epoched with respect to EOG events.
# We will also use a little bit of smoothing along the trials axis in the
# epochs image:
ica.plot_properties(eog_epochs, picks=eog_inds, psd_args={'fmax': 35.},
image_args={'sigma': 1.})
###############################################################################
# That component is showing a prototypical average vertical EOG time course.
#
# Pay attention to the labels, a customized read-out of the
# ``mne.preprocessing.ICA.labels_``:
print(ica.labels_)
###############################################################################
# These labels were used by the plotters and are added automatically
# by artifact detection functions. You can also manually edit them to annotate
# components.
#
# Now let's see how we would modify our signals if we removed this component
# from the data.
ica.plot_overlay(eog_average, exclude=eog_inds, show=False)
# red -> before, black -> after. Yes! We remove quite a lot!
# to definitely register this component as a bad one to be removed
# there is the ``ica.exclude`` attribute, a simple Python list
ica.exclude.extend(eog_inds)
# from now on the ICA will reject this component even if no exclude
# parameter is passed, and this information will be stored to disk
# on saving
# uncomment this for reading and writing
# ica.save('my-ica.fif')
# ica = read_ica('my-ica.fif')
###############################################################################
# Note that nothing is yet removed from the raw data. To remove the effects of
# the rejected components,
# :meth:`the apply method <mne.preprocessing.ICA.apply>` must be called.
# Here we apply it on the copy of the first ten seconds, so that the rest of
# this tutorial still works as intended.
raw_copy = raw.copy().crop(0, 10)
ica.apply(raw_copy)
raw_copy.plot() # check the result
###############################################################################
# Exercise: find and remove ECG artifacts using ICA!
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ica.plot_properties(ecg_epochs, picks=ecg_inds, psd_args={'fmax': 35.})
###############################################################################
# What if we don't have an EOG channel?
# -------------------------------------
#
# We could either:
#
# 1. make a bipolar reference from frontal EEG sensors and use as virtual EOG
# channel. This can be tricky though as you can only hope that the frontal
# EEG channels only reflect EOG and not brain dynamics in the prefrontal
# cortex.
# 2. go for a semi-automated approach, using template matching.
#
# In MNE-Python option 2 is easily achievable and it might give better results,
# so let's have a look at it.
from mne.preprocessing.ica import corrmap # noqa
###############################################################################
# The idea behind corrmap is that artefact patterns are similar across subjects
# and can thus be identified by correlating the different patterns resulting
# from each solution with a template. The procedure is therefore
# semi-automatic. :func:`mne.preprocessing.corrmap` hence takes a list of
# ICA solutions and a template, that can be an index or an array.
#
# As we don't have different subjects or runs available today, here we will
# simulate ICA solutions from different subjects by fitting ICA models to
# different parts of the same recording. Then we will use one of the components
# from our original ICA as a template in order to detect sufficiently similar
# components in the simulated ICAs.
#
# The following block of code simulates having ICA solutions from different
# runs/subjects so it should not be used in real analysis - use independent
# data sets instead.
# We'll start by simulating a group of subjects or runs from a subject
start, stop = [0, raw.times[-1]]
intervals = np.linspace(start, stop, 4, dtype=np.float)
icas_from_other_data = list()
raw.pick_types(meg=True, eeg=False) # take only MEG channels
for ii, start in enumerate(intervals):
if ii + 1 < len(intervals):
stop = intervals[ii + 1]
print('fitting ICA from {0} to {1} seconds'.format(start, stop))
this_ica = ICA(n_components=n_components, method=method).fit(
raw, start=start, stop=stop, reject=reject)
icas_from_other_data.append(this_ica)
###############################################################################
# Remember, don't do this at home! Start by reading in a collection of ICA
# solutions instead. Something like:
#
# ``icas = [mne.preprocessing.read_ica(fname) for fname in ica_fnames]``
print(icas_from_other_data)
###############################################################################
# We use our original ICA as reference.
reference_ica = ica
###############################################################################
# Investigate our reference ICA:
reference_ica.plot_components()
###############################################################################
# Which one is the bad EOG component?
# Here we rely on our previous detection algorithm. You would need to decide
# yourself if no automatic detection was available.
reference_ica.plot_sources(eog_average, exclude=eog_inds)
###############################################################################
# Indeed it looks like an EOG, also in the average time course.
#
# We construct a list where our reference run is the first element. Then we
# can detect similar components from the other runs (the other ICA objects)
# using :func:`mne.preprocessing.corrmap`. So our template must be a tuple like
# (reference_run_index, component_index):
icas = [reference_ica] + icas_from_other_data
template = (0, eog_inds[0])
###############################################################################
# Now we can run the CORRMAP algorithm.
fig_template, fig_detected = corrmap(icas, template=template, label="blinks",
show=True, threshold=.8, ch_type='mag')
###############################################################################
# Nice, we have found similar ICs from the other (simulated) runs!
# In this way, you can detect a type of artifact semi-automatically for example
# for all subjects in a study.
# The detected template can also be retrieved as an array and stored; this
# array can be used as an alternative template to
# :func:`mne.preprocessing.corrmap`.
eog_component = reference_ica.get_components()[:, eog_inds[0]]
###############################################################################
# If you calculate a new ICA solution, you can provide this array instead of
# specifying the template in reference to the list of ICA objects you want
# to run CORRMAP on. (Of course, the retrieved component map arrays can
# also be used for other purposes than artifact correction.)
#
# You can also use SSP to correct for artifacts. It is a bit simpler and
# faster but also less precise than ICA and requires that you know the event
# timing of your artifact.
# See :ref:`tut_artifacts_correct_ssp`.
| bsd-3-clause |
pv/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/util/test_deprecate_kwarg.py | 8 | 2043 | import pytest
from pandas.util._decorators import deprecate_kwarg
import pandas._testing as tm
@deprecate_kwarg("old", "new")
def _f1(new=False):
return new
_f2_mappings = {"yes": True, "no": False}
@deprecate_kwarg("old", "new", _f2_mappings)
def _f2(new=False):
return new
def _f3_mapping(x):
return x + 1
@deprecate_kwarg("old", "new", _f3_mapping)
def _f3(new=0):
return new
@pytest.mark.parametrize("key,klass", [("old", FutureWarning), ("new", None)])
def test_deprecate_kwarg(key, klass):
x = 78
with tm.assert_produces_warning(klass):
assert _f1(**{key: x}) == x
@pytest.mark.parametrize("key", list(_f2_mappings.keys()))
def test_dict_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == _f2_mappings[key]
@pytest.mark.parametrize("key", ["bogus", 12345, -1.23])
def test_missing_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == key
@pytest.mark.parametrize("x", [1, -1.4, 0])
def test_callable_deprecate_kwarg(x):
with tm.assert_produces_warning(FutureWarning):
assert _f3(old=x) == _f3_mapping(x)
def test_callable_deprecate_kwarg_fail():
msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)"
with pytest.raises(TypeError, match=msg):
_f3(old="hello")
def test_bad_deprecate_kwarg():
msg = "mapping from old to new argument values must be dict or callable!"
with pytest.raises(TypeError, match=msg):
@deprecate_kwarg("old", "new", 0)
def f4(new=None):
return new
@deprecate_kwarg("old", None)
def _f4(old=True, unchanged=True):
return old, unchanged
@pytest.mark.parametrize("key", ["old", "unchanged"])
def test_deprecate_keyword(key):
x = 9
if key == "old":
klass = FutureWarning
expected = (x, True)
else:
klass = None
expected = (True, x)
with tm.assert_produces_warning(klass):
assert _f4(**{key: x}) == expected
| bsd-3-clause |
snap-stanford/ogb | ogb/graphproppred/dataset.py | 1 | 7790 | import pandas as pd
import shutil, os
import numpy as np
import os.path as osp
from ogb.utils.url import decide_download, download_url, extract_zip
from ogb.io.read_graph_raw import read_csv_graph_raw, read_binary_graph_raw
import torch
class GraphPropPredDataset(object):
def __init__(self, name, root = 'dataset', meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbg-hib
if meta_dict is None:
self.dir_name = '_'.join(name.split('-')) ## replace hyphen with underline, e.g., ogbg_hiv
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
# check version
# First check whether the dataset has been already downloaded or not.
# If so, check whether the dataset version is the newest or not.
# If the dataset is not the newest version, notify this to the user.
if osp.isdir(self.root) and (not osp.exists(osp.join(self.root, 'RELEASE_v' + str(self.meta_info['version']) + '.txt'))):
print(self.name + ' has been updated.')
if input('Will you update the dataset now? (y/N)\n').lower() == 'y':
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.num_classes = self.meta_info['num classes']
self.binary = self.meta_info['binary'] == 'True'
super(GraphPropPredDataset, self).__init__()
self.pre_process()
def pre_process(self):
processed_dir = osp.join(self.root, 'processed')
raw_dir = osp.join(self.root, 'raw')
pre_processed_file_path = osp.join(processed_dir, 'data_processed')
if os.path.exists(pre_processed_file_path):
loaded_dict = torch.load(pre_processed_file_path, 'rb')
self.graphs, self.labels = loaded_dict['graphs'], loaded_dict['labels']
else:
### check download
if self.binary:
# npz format
has_necessary_file = osp.exists(osp.join(self.root, 'raw', 'data.npz'))
else:
# csv file
has_necessary_file = osp.exists(osp.join(self.root, 'raw', 'edge.csv.gz'))
### download
if not has_necessary_file:
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
# delete folder if there exists
try:
shutil.rmtree(self.root)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop download.')
exit(-1)
### preprocess
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.binary:
self.graphs = read_binary_graph_raw(raw_dir, add_inverse_edge = add_inverse_edge)
else:
self.graphs = read_csv_graph_raw(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)
if self.task_type == 'subtoken prediction':
labels_joined = pd.read_csv(osp.join(raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
# need to split each element into subtokens
self.labels = [str(labels_joined[i][0]).split(' ') for i in range(len(labels_joined))]
else:
if self.binary:
self.labels = np.load(osp.join(raw_dir, 'graph-label.npz'))['graph_label']
else:
self.labels = pd.read_csv(osp.join(raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
print('Saving...')
torch.save({'graphs': self.graphs, 'labels': self.labels}, pre_processed_file_path, pickle_protocol=4)
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]
return {'train': train_idx, 'valid': valid_idx, 'test': test_idx}
def __getitem__(self, idx):
'''Get datapoint with index'''
if isinstance(idx, (int, np.integer)):
return self.graphs[idx], self.labels[idx]
raise IndexError(
'Only integer is valid index (got {}).'.format(type(idx).__name__))
def __len__(self):
'''Length of the dataset
Returns
-------
int
Length of Dataset
'''
return len(self.graphs)
def __repr__(self): # pragma: no cover
return '{}({})'.format(self.__class__.__name__, len(self))
if __name__ == '__main__':
dataset = GraphPropPredDataset(name = 'ogbg-code')
# target_list = np.array([len(label) for label in dataset.labels])
# print(np.sum(target_list == 1)/ float(len(target_list)))
# print(np.sum(target_list == 2)/ float(len(target_list)))
# print(np.sum(target_list == 3)/ float(len(target_list)))
# from collections import Counter
# print(Counter(target_list))
print(dataset.num_classes)
split_index = dataset.get_idx_split()
print(split_index)
# print(dataset)
# print(dataset[2])
# print(split_index['train'])
# print(split_index['valid'])
# print(split_index['test'])
| mit |
macks22/fastFM | fastFM/tests/test_als.py | 1 | 4939 | # Author: Immanuel Bayer
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn import metrics
from fastFM import als
from numpy.testing import assert_almost_equal
from fastFM.datasets import make_user_item_regression
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_almost_equal
def get_test_problem(task='regression'):
X = sp.csc_matrix(np.array([[6, 1],
[2, 3],
[3, 0],
[6, 1],
[4, 5]]), dtype=np.float64)
y = np.array([298, 266, 29, 298, 848], dtype=np.float64)
V = np.array([[6, 0],
[5, 8]], dtype=np.float64)
w = np.array([9, 2], dtype=np.float64)
w0 = 2
if task == 'classification':
y_labels = np.ones_like(y)
y_labels[y < np.median(y)] = -1
y = y_labels
return w0, w, V, y, X
def get_small_data():
X = sp.csc_matrix(np.array([[1, 2],
[3, 4],
[5, 6]]), dtype=np.float64)
y = np.array([600, 2800, 10000], dtype=np.float64)
return X, y
def _test_fm_regression_only_w0():
X, y = get_small_data()
fm = als.FMRegression(n_iter=0, l2_reg_w=0, l2_reg_V=0, rank=0)
fm.ignore_w = True
fm.w0_ = 2
fm.fit(X, y, warm_start=True)
assert_almost_equal(fm.w0_, 2, 6)
fm = als.FMRegression(n_iter=1, l2_reg_w=0, l2_reg_V=0, rank=0)
fm.ignore_w = True
fm.w0_ = 2
fm.fit(X, y, warm_start=True)
assert_almost_equal(fm.w0_, 4466.6666666666661, 6)
def test_fm_regression():
w0, w, V, y, X = get_test_problem()
fm = als.FMRegression(n_iter=1000, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X, y)
y_pred = fm.predict(X)
assert_almost_equal(y_pred, y, 3)
# check different size
fm = als.FMRegression(n_iter=1000, l2_reg_w=0, l2_reg_V=0, rank=5)
X_big = sp.hstack([X,X])
fm.fit(X_big, y)
y_pred = fm.predict(X_big[:2,])
def test_fm_classification():
w0, w, V, y, X = get_test_problem(task='classification')
fm = als.FMClassification(n_iter=1000,
init_stdev=0.1, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X, y)
y_pred = fm.predict(X)
print y_pred
assert metrics.accuracy_score(y, y_pred) > 0.95
# check different size
fm.fit(X[:2,], y[:2])
def test_als_warm_start():
X, y, coef = make_user_item_regression(label_stdev=0)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
X_train = sp.csc_matrix(X_train)
X_test = sp.csc_matrix(X_test)
fm = als.FMRegression(n_iter=10, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X_train, y_train)
y_pred = fm.predict(X_test)
error_10_iter = mean_squared_error(y_pred, y_test)
fm = als.FMRegression(n_iter=5, l2_reg_w=0, l2_reg_V=0, rank=2)
fm.fit(X_train, y_train)
print fm.iter_count
y_pred = fm.predict(X_test)
error_5_iter = mean_squared_error(y_pred, y_test)
fm.fit(sp.csc_matrix(X_train), y_train, n_more_iter=5)
print fm.iter_count
y_pred = fm.predict(X_test)
error_5_iter_plus_5 = mean_squared_error(y_pred, y_test)
print error_5_iter, error_5_iter_plus_5, error_10_iter
assert error_10_iter == error_5_iter_plus_5
def test_warm_start_path():
X, y, coef = make_user_item_regression(label_stdev=.4)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
X_train = sp.csc_matrix(X_train)
X_test = sp.csc_matrix(X_test)
n_iter = 10
rank = 4
seed = 333
step_size = 1
l2_reg_w = 0
l2_reg_V = 0
fm = als.FMRegression(n_iter=0, l2_reg_w=l2_reg_w,
l2_reg_V=l2_reg_V, rank=rank, random_state=seed)
# initalize coefs
fm.fit(X_train, y_train)
rmse_train = []
rmse_test = []
for i in range(1, n_iter):
fm.fit(X_train, y_train, n_more_iter=step_size)
rmse_train.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train)))
rmse_test.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test)))
print '------- restart ----------'
values = np.arange(1, n_iter)
rmse_test_re = []
rmse_train_re = []
for i in values:
fm = als.FMRegression(n_iter=i, l2_reg_w=l2_reg_w,
l2_reg_V=l2_reg_V, rank=rank, random_state=seed)
fm.fit(X_train, y_train)
rmse_test_re.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test)))
rmse_train_re.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train)))
assert_almost_equal(rmse_train, rmse_train_re)
assert_almost_equal(rmse_test, rmse_test_re)
if __name__ == '__main__':
#test_fm_regression_only_w0()
test_als_warm_start()
| bsd-3-clause |
rtrwalker/geotecha | geotecha/consolidation/schiffmanandstein1970.py | 1 | 27514 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""
Schiffman and Stein 1970 "One-Dimensional consolidation of layered systems".
"""
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import geotecha.inputoutput.inputoutput as inputoutput
import math
import textwrap
import scipy.optimize
import geotecha.piecewise.piecewise_linear_1d as pwise
from geotecha.mathematics.root_finding import find_n_roots
import time
import geotecha.plotting.one_d
from geotecha.inputoutput.inputoutput import GenericInputFileArgParser
class SchiffmanAndStein1970(inputoutput.InputFileLoaderCheckerSaver):
"""One-dimensional consolidation of layered systems
Implements Schiffman and Stein (1970) [1]_.
Features:
- Vertical flow, multiple layers.
- Soil properties constant with time.
- Drained or Undrained boundary condtions. Also possbile to have
stiff high peremability impeding layers at top and bottom.
- Load is uniform with depth but varies piecewise-linear with time.
- Pore pressure vs depth at various times.
- Average pore pressure vs time. Average is over the entire soil layer.
- Settlement vs time. Settlement is over whole profile.
.. warning::
The 'Parameters' and 'Attributes' sections below require further
explanation. The parameters listed below are not used to explicitly
initialize the object. Rather they are defined in either a
multi-line string or a file-like object using python syntax.
It is the file object or string object that is used to initialize
the object. Each 'parameter' will be turned into an attribute that
can be accessed using conventional python dot notation, after the
object has been initialised. The attributes listed below are
calculated values (i.e. they could be interpreted as results) which
are accessible using dot notation after all calculations are
complete.
Parameters
----------
z : list/array of float
Depth to calc pore pressure at.
t : list/array of float
Time values to calc time variation of parameters at.
tpor : list/array of float
Time values to calc pore pressure profiles at.
h : list/array of float
Layer thicknesses.
n : int, optional
Number of series terms to use. Default n=5.
kv : list/array of float
Layer vertical permeability divided by unit weight of water.
mv : list/array of float
Layer volume compressibility.
bctop, bcbot : [0, 1, 3]
Boundary condition. bctop=0 is free draining, bctop=1 is
impervious, bctop=3 is impeded by stiff layer of thickness htop
and permeability ktop.
htop, hbot : float, optional
Thickness of top and bottom impeding layer. Only used if bcbot==3 or
bctop==3.
ktop, kbot : float, optional
Top and bottom impeding layer vertical permeability divided by
unit weight of water. Only used if bcbot==3 or bctop==3.
surcharge_vs_time : PolyLine
Piecewise linear variation of surcharge with time
show_vert_eigs : True/False, optional
If true a vertical eigen value plot will be made. This is useful to
to chekc if the correct eigenvalues have been found.
Default show_vert_eigs=False
plot_properties : dict of dict, optional
dictionary that overrides some of the plot properties.
Each member of `plot_properties` will correspond to one of the plots.
================== ============================================
plot_properties description
================== ============================================
por dict of prop to pass to pore pressure plot.
avp dict of prop to pass to average pore
pressure plot.
set dict of prop to pass to settlement plot.
================== ============================================
see geotecha.plotting.one_d.plot_vs_depth and
geotecha.plotting.one_d.plot_vs_time for options to specify in
each plot dict.
save_data_to_file : True/False, optional
If True data will be saved to file. Default save_data_to_file=False
save_figures_to_file : True/False
If True then figures will be saved to file.
Default save_figures_to_file=False
show_figures : True/False, optional
If True the after calculation figures will be shown on screen.
Default show_figures=False.
directory : string, optional
Path to directory where files should be stored.
Default directory=None which
will use the current working directory. Note if you keep getting
directory does not exist errors then try putting an r before the
string definition. i.e. directory = r'C:\\Users\\...'
overwrite : True/False, optional
If True then existing files will be overwritten.
Default overwrite=False.
prefix : string, optional
Filename prefix for all output files. Default prefix= 'out'
create_directory : True/Fase, optional
If True a new sub-folder with name based on `prefix` and an
incremented number will contain the output
files. Default create_directory=True.
data_ext : string, optional
File extension for data files. Default data_ext='.csv'
input_ext : string, optional
File extension for original and parsed input files. default = ".py"
figure_ext : string, optional
File extension for figures. Can be any valid matplotlib option for
savefig. Default figure_ext=".eps". Others include 'pdf', 'png'.
title : str, optional
A title for the input file. This will appear at the top of data files.
Default title=None, i.e. no title.
author : str, optional
Author of analysis. Default='unknown'.
Attributes
----------
por : array of shape (len(z), len(tpor))
Pore pressure vs depth at various times. Only present if tpor defined.
avp : array of shape (1, len(t))
Averge pore pressure of profile various times. Only present if t
defined.
set : array of shape (1, len(t))
Surface settlement at various times. Only present if t
defined.
See Also
--------
geotecha.piecewise.piecewise_linear_1d.PolyLine : how to specify loadings
References
----------
.. [1] Schiffman, R. L, and J. R Stein. 'One-Dimensional Consolidation
of Layered Systems'. Journal of the Soil Mechanics and
Foundations Division 96, no. 4 (1970): 1499-1504.
"""
def _setup(self):
self._attribute_defaults = {'n': 5,
'prefix': 'ss1970_',
'show_vert_eigs': False}
self._attributes = 'z t tpor n h kv mv bctop bcbot htop ktop hbot kbot surcharge_vs_time show_vert_eigs'.split()
self._attributes_that_should_have_same_len_pairs = [
'h kv'.split(),
'kv mv'.split(),
'h mv'.split()] #pairs that should have the same length
self._attributes_that_should_be_lists= []
self._attributes_that_should_have_same_x_limits = []
self.z = None
self.t = None
self.tpor = None
self.n = self._attribute_defaults.get('n', None)
self.prefix = self._attribute_defaults.get('prefix', None)
self.show_vert_eigs = self._attribute_defaults.get('show_vert_eigs', None)
self.h = None
self.kv = None
self.mv = None
self.bctop = None
self.bcbot = None
self.htop = None
self.ktop = None
self.hbot = None
self.kbot = None
self.surcharge_vs_time = None
self._zero_or_all = [
'h kv mv'.split(),
'htop ktop'.split(),
'hbot kbot'.split(),
'z t'.split()]
self._at_least_one = [['surcharge_vs_time']]
self._one_implies_others = []
def _calc_derived_properties(self):
"""Calculate properties/ratios derived from input"""
self.check_input_attributes()
self.t = np.asarray(self.t)
self.z = np.asarray(self.z)
self.kv = np.asarray(self.kv)
self.mv = np.asarray(self.mv)
self.h = np.asarray(self.h)
self.nlayers = len(self.kv)
self.zlayer = np.cumsum(self.h)
# print (self.zlayer)
# self.zlayer = np.zeros(nlayers +1, dtype=float)
# self.zlayer[1:] = np.cumsum(self.h)
self.cv = self.kv / self.mv
if self.bctop == 0:
self.atop = 0
self.btop = -1
elif self.bctop == 1:
self.atop = 1
self.btop = 0
elif self.bctop == 3:
self.atop = h[0]
self.btop = ktop * h[0] / (kv[0] * htop)
else:
raise ValueError('bctop must be 0, 1, or 2. you have bctop = {}'.foramt(self.bctop))
if self.bcbot == 0:
self.abot = 0
self.bbot = 1
elif self.bcbot == 1:
self.abot = 1
self.bbot = 0
elif self.bcbot == 3:
self.abot = h[-1]
self.bbot = kbot * h[-1] / (kv[-1] * hbot)
else:
raise ValueError('bctop must be 0, 1, or 2. you have bctop = {}'.format(self.bctop))
self.BC = np.zeros((2 * self.nlayers, 2* self.nlayers), dtype=float)
def produce_plots(self):
"""Produce plots of analysis"""
# geotecha.plotting.one_d.pleasing_defaults()
matplotlib.rcParams.update({'font.size': 11})
matplotlib.rcParams.update({'font.family': 'serif'})
self._figures=[]
#por
if not self.tpor is None:
f=self._plot_por()
title = 'fig_por'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
if not self.t is None:
f=self._plot_avp()
title = 'fig_avp'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
f=self._plot_set()
title = 'fig_set'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
if self.show_vert_eigs:
f = self.plot_characteristic_curve_and_roots(1000)
title = 'vertical characteristic curve and eigs'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
def _plot_por(self):
"""plot depth vs pore pressure for various times
"""
t = self.tpor
line_labels = ['{:.3g}'.format(v) for v in t]
por_prop = self.plot_properties.pop('por', dict())
if not 'xlabel' in por_prop:
por_prop['xlabel'] = 'Pore pressure'
#to do
fig_por = geotecha.plotting.one_d.plot_vs_depth(self.por, self.z,
line_labels=line_labels,
prop_dict=por_prop)
return fig_por
def _plot_avp(self):
"""plot average pore pressure of profile"""
t = self.t
line_labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
avp_prop = self.plot_properties.pop('avp', dict())
if not 'ylabel' in avp_prop:
avp_prop['ylabel'] = 'Average pore pressure'
fig_avp = geotecha.plotting.one_d.plot_vs_time(t, self.avp.T,
line_labels=line_labels,
prop_dict=avp_prop)
return fig_avp
def _plot_set(self):
"""plot surface settlement"""
t = self.t
line_labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
set_prop = self.plot_properties.pop('set', dict())
if not 'ylabel' in set_prop:
set_prop['ylabel'] = 'surface settlement'
fig_set = geotecha.plotting.one_d.plot_vs_time(t, self.set.T,
line_labels=line_labels,
prop_dict=set_prop)
fig_set.gca().invert_yaxis()
return fig_set
def make_all(self):
"""make_output, produce files and plots"""
# self.check_input_attributes()
self.make_output()
if getattr(self, 'save_data_to_file', False):
self._save_data()
if (getattr(self, 'save_figures_to_file', False) or
getattr(self, 'show_figures', False)):
self.produce_plots()
if getattr(self, 'save_figures_to_file', False):
self._save_figures()
if getattr(self, 'show_figures', False):
plt.show()
def make_output(self):
"""Perform all calculations"""
self._calc_derived_properties()
self._find_beta()
self._calc_Bm_and_Cm()
self._calc_Am()
header1 = "program: schiffmanandstein1970; geotecha version: {}; author: {}; date: {}\n".format(self.version, self.author, time.strftime('%Y/%m/%d %H:%M:%S'))
if not self.title is None:
header1 += "{}\n".format(self.title)
self._grid_data_dicts = []
if not self.tpor is None:
self.calc_por()
labels = ['{:.3g}'.format(v) for v in self.z]
d = {'name': '_data_por',
'data': self.por.T,
'row_labels': self.tpor,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'Pore pressure at depth'}
self._grid_data_dicts.append(d)
if not self.t is None:
self.calc_settle_and_avp()
labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
d = {'name': '_data_avp',
'data': self.avp.T,
'row_labels': self.t,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'Average pore pressure between depths'}
self._grid_data_dicts.append(d)
labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
d = {'name': '_data_set',
'data': self.avp.T,
'row_labels': self.t,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'settlement between depths'}
self._grid_data_dicts.append(d)
if self._debug:
print ('beta')
print (self._beta)
print('Bm')
print(self._Bm)
print('Cm')
print(self._Cm)
print('Am')
print(self._Am)
return
def _find_beta(self):
"""find the eigenvalues of the solution
"""
H = self.zlayer[-1]
x0 = 0.1 / H**2
self._beta0 = np.empty(self.n, dtype=float)
self._beta0[:] = find_n_roots(self._characteristic_eqn, n=self.n,
x0=x0, dx=x0, p=1.01)
return
def _characteristic_eqn(self, beta0):
"""function for characteristic equation
Roots are the eigenvalues of problem beta 1
"""
self._make_BC(beta0)
return np.linalg.det(self.BC)
def _make_BC(self, beta0):
"""make boundary condition matrix
for use in characteristic equatin and in determining coefficients B,C
"""
beta = np.zeros_like(self.h, dtype=float)
beta[0] = beta0
for i in range(1, self.nlayers):
beta[i] = np.sqrt(self.cv[i-1] / self.cv[i] * beta[i-1]**2)
alpha = self.kv[:-1] / self.kv[1:]
self.BC[0, 0] = self.btop * (-1)
self.BC[0, 1] = self.atop * beta[0]
self.BC[-1, -2] = (self.bbot * math.cos(beta[-1] * self.zlayer[-1]) -
self.abot * beta[-1] * math.sin(beta[-1] * self.zlayer[-1]))
self.BC[-1, -1] = (self.bbot * math.sin(beta[-1] * self.zlayer[-1]) +
self.abot * beta[-1] * math.cos(beta[-1] * self.zlayer[-1]))
for i in range(self.nlayers - 1):
#1st equation
#TODO: row is wrong
row = 2 * i + 1
self.BC[row, 2 * i] = math.cos(beta[i] * self.zlayer[i])#Bi coeef
self.BC[row, 2 * i + 1] = math.sin(beta[i] * self.zlayer[i])#Ci coeef#C coeff
self.BC[row, 2 * i + 2] = -math.cos(beta[i+1] * self.zlayer[i]) #Bi+1 coeef
self.BC[row, 2 * i + 3] = -math.sin(beta[i+1] * self.zlayer[i])#Ci+1 coeff
#2nd equation
row += 1
self.BC[row, 2 * i] = - alpha[i] * beta[i] * math.sin(beta[i] * self.zlayer[i])#Bi coeef
self.BC[row, 2 * i + 1] = alpha[i] * beta[i] * math.cos(beta[i] * self.zlayer[i])#Ci coeef#C coeff
self.BC[row, 2 * i + 2] = beta[i+1] * math.sin(beta[i+1] * self.zlayer[i]) #Bi+1 coeef
self.BC[row, 2 * i + 3] = - beta[i+1] * math.cos(beta[i+1] * self.zlayer[i])#Ci+1 coeff
return beta
def plot_characteristic_curve_and_roots(self, npts=400):
"""Plot the characteristic curve for the problem showing roots
Run after an analysis to check if roots are reasonable.
Parameters
---------
npts : int
Number of points to plot. Default npts=400.
Returns
-------
fig : matplotlib.Figure
A plot.
"""
x = np.linspace(0, self._beta0[-1] + (self._beta0[-1]-self._beta0[-2])/8, npts)
y = np.zeros_like(x)
for i in range(len(x)):
y[i]=self._characteristic_eqn(x[i])
# plt.gcf().clear()
fig = plt.figure(figsize=(30,5))
ax = fig.add_subplot('111')
ax.plot(x ,y,'-', marker='.', markersize=3)
ax.plot(self._beta0, np.zeros_like(self._beta0),'ro', markersize=6)
ax.set_ylabel('det(A)')
ax.set_xlabel('beta0')
# plt.gca().set_ylim(-0.1,0.1)
ax.grid()
fig.tight_layout()
return fig
def _calc_Bm_and_Cm(self):
"""calculate the coefficinets Bm and Cm"""
self._Bm = np.zeros((self.n, self.nlayers), dtype=float)
self._Cm = np.zeros((self.n, self.nlayers), dtype=float)
self._beta = np.zeros((self.n, self.nlayers), dtype=float)
self._Cm[:, -1] = 1.0
for i, beta in enumerate(self._beta0):
self._beta[i, :] = self._make_BC(beta)
self.BC[np.abs(self.BC)<1e-10]=0
if self._debug and i==0:
print('BC for beta0')
print(self.BC)
b = -self.BC[:-1, -1]
a = self.BC[:-1, :-1]
x = np.linalg.solve(a, b)
self._Bm[i, :] = x[::2]
self._Cm[i, :-1] = x[1::2]
def _Tm_integrations(self):
"""symbolic integration of the Tm coefficient
just used as a step to derive some code"""
import sympy
cv, beta, t, tau, t1, t2, sig1, sig2 = sympy.var('cv, beta, t, tau, t1, t2, sig1, sig2')
q = sig1 + (sig2 - sig1) / (t2 - t1) * tau
f = sympy.diff(q, tau) * sympy.exp(-cv * beta**2 * (t - tau))
#uniform laod
#within ramp
Tm = sympy.integrate(f, (tau, t1, t))
print('Tm within a ramp load')
print(Tm)
# after ramp
Tm = sympy.integrate(f, (tau, t1, t2))
print('Tm after a ramp load')
print(Tm)
return
def _avp_integrations(self):
"""symbolic integration of for avp average pore pressure
just used as a step to derive some code"""
import sympy
z, mv, Bm, Cm, beta, f, Zm, z1, z2 = sympy.var('z, mv, Bm, Cm, beta, f, Zm, z1, z2')
Zm = Bm * sympy.cos(beta * z) + Cm * sympy.sin(beta * z)
f = sympy.integrate(Zm, (z, z1, z2))
print('summation term for avp')
print(f)
return
def _Am_integrations(self):
"""symbolic integration of for Am coefficient
just used as a step to derive some code"""
import sympy
z, mv, Bm, Cm, beta, f, Zm, z1, z2 = sympy.var('z, mv, Bm, Cm, beta, f, Zm, z1, z2')
Zm = Bm * sympy.cos(beta * z) + Cm * sympy.sin(beta * z)
#uniform initial pore pressure
numerator = mv * sympy.integrate(Zm, (z, z1, z2))
denominator = mv * (sympy.integrate(Zm**2, (z, z1, z2)))
# Am = numerator / denominator
print('Am numerator - uniform initial pore pressure')
print(numerator)
print('Am denominator - uniform initial pore pressure')
print(denominator)
# print('**')
# print(Am)
def _calc_Am(self):
"""make the Am coefficients"""
cos = math.cos
sin = math.sin
self._Am = np.zeros(self.n, dtype=float)
_z2 = self.zlayer
_z1 = self.zlayer - self.h
for m in range(self.n):
numer = 0
denom = 0
for i in range(self.nlayers):
z1=_z1[i]
z2=_z2[i]
mv = self.mv[i]
Bm = self._Bm[m, i]
Cm = self._Cm[m, i]
beta = self._beta[m, i]
numer += mv*(-Bm*sin(beta*z1)/beta + Bm*sin(beta*z2)/beta +
Cm*cos(beta*z1)/beta - Cm*cos(beta*z2)/beta)
denom += mv*(-Bm**2*z1*sin(beta*z1)**2/2 -
Bm**2*z1*cos(beta*z1)**2/2 + Bm**2*z2*sin(beta*z2)**2/2 +
Bm**2*z2*cos(beta*z2)**2/2 -
Bm**2*sin(beta*z1)*cos(beta*z1)/(2*beta) +
Bm**2*sin(beta*z2)*cos(beta*z2)/(2*beta) +
Bm*Cm*cos(beta*z1)**2/beta - Bm*Cm*cos(beta*z2)**2/beta -
Cm**2*z1*sin(beta*z1)**2/2 - Cm**2*z1*cos(beta*z1)**2/2 +
Cm**2*z2*sin(beta*z2)**2/2 + Cm**2*z2*cos(beta*z2)**2/2 +
Cm**2*sin(beta*z1)*cos(beta*z1)/(2*beta) -
Cm**2*sin(beta*z2)*cos(beta*z2)/(2*beta))
Am = numer / denom
self._Am[m] = Am
return
def _calc_Tm(self, cv, beta, t):
"""calculate the Tm expression at a given time
Parameters
----------
cv : float
coefficient of vertical consolidation for layer
beta : float
eigenvalue for layer
t : float
time value
Returns
-------
Tm: float
time dependant function
"""
loadmag = self.surcharge_vs_time.y
loadtim = self.surcharge_vs_time.x
(ramps_less_than_t, constants_less_than_t, steps_less_than_t,
ramps_containing_t, constants_containing_t) = pwise.segment_containing_also_segments_less_than_xi(loadtim, loadmag, t, steps_or_equal_to = True)
exp = math.exp
Tm=0
i=0 #only one time value
for k in steps_less_than_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
Tm += (sig2-sig1)*exp(-cv * beta**2 * (t-loadtim[k]))
for k in ramps_containing_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
t1 = loadtim[k]
t2 = loadtim[k+1]
# Tm += (-sig1 + sig2)/(beta**2*cv*(-t1 + t2)) - (-sig1 + sig2)*exp(-beta**2*cv*t)*exp(beta**2*cv*t1)/(beta**2*cv*(-t1 + t2))
Tm += (-sig1 + sig2)/(beta**2*cv*(-t1 + t2)) - (-sig1 + sig2)*exp(-beta**2*cv*(t-t1))/(beta**2*cv*(-t1 + t2))
for k in ramps_less_than_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
t1 = loadtim[k]
t2 = loadtim[k+1]
# Tm += -(-sig1 + sig2)*exp(-beta**2*cv*t)*exp(beta**2*cv*t1)/(beta**2*cv*(-t1 + t2)) + (-sig1 + sig2)*exp(-beta**2*cv*t)*exp(beta**2*cv*t2)/(beta**2*cv*(-t1 + t2))
Tm += -(-sig1 + sig2)*exp(-beta**2*cv*(t-t1))/(beta**2*cv*(-t1 + t2)) + (-sig1 + sig2)*exp(-beta**2*cv*(t-t2))/(beta**2*cv*(-t1 + t2))
return Tm
def calc_settle_and_avp(self):
"""Calculate settlement and average pore pressure at time"""
self.set = np.zeros(len(self.t), dtype=float)
self.avp = np.zeros(len(self.t), dtype=float)
_z2 = self.zlayer
_z1 = self.zlayer - self.h
# print(_z1,_z2)
sin = math.sin
cos = math.cos
for j, t in enumerate(self.t):
settle=0
avp = 0
q = pwise.pinterp_x_y(self.surcharge_vs_time, t)[0]
settle = np.sum(self.mv * self.h) * q
for layer in range(self.nlayers):
for m in range(self.n):
z1=_z1[layer]
z2=_z2[layer]
Am = self._Am[m]
mv = self.mv[layer]
Bm = self._Bm[m, layer]
Cm = self._Cm[m, layer]
beta = self._beta[m, layer]
cv = self.cv[layer]
Zm_integral = -Bm*sin(beta * z1)/beta + Bm * sin(beta * z2)/beta + Cm * cos(beta*z1)/beta - Cm*cos(beta*z2)/beta
Tm = self._calc_Tm(cv, beta, t)
avp += Zm_integral * Tm * Am
settle -= mv * Zm_integral * Tm * Am
self.set[j] = settle
self.avp[j] = avp / self.zlayer[-1]
return
def calc_por(self):
"""Calculate pore pressure at depth and time"""
if self.tpor is None:
self.tpor==self.t
self.por = np.zeros((len(self.z), len(self.tpor)), dtype=float)
z_in_layer = np.searchsorted(self.zlayer, self.z)
for j, t in enumerate(self.tpor):
for m in range(self.n):
for k, z in enumerate(self.z):
layer = z_in_layer[k]
Am = self._Am[m]
Bm = self._Bm[m, layer]
Cm = self._Cm[m, layer]
beta = self._beta[m, layer]
cv = self.cv[layer]
Zm = Bm * math.cos(beta * z) + Cm * math.sin(beta * z)
# Tm = math.exp(-cv * beta**2 * t)
Tm = self._calc_Tm(cv, beta, t)
self.por[k, j] += Am * Zm * Tm
def main():
"""Run schiffmanandstein1970 as script"""
a = GenericInputFileArgParser(obj=SchiffmanAndStein1970,
methods=[('make_all', [], {})],
pass_open_file=True)
a.main()
if __name__ == '__main__':
# import nose
# nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest'])
## nose.runmodule(argv=['nose', '--verbosity=3'])
main()
| gpl-3.0 |
alubbock/pysb-legacy | pysb/examples/paper_figures/fig6.py | 4 | 9163 | """Produce contact map for Figure 5D from the PySB publication"""
import pysb.integrate
import pysb.util
import numpy as np
import scipy.optimize
import scipy.interpolate
import matplotlib.pyplot as plt
import os
import sys
import inspect
from earm.lopez_embedded import model
# List of model observables and corresponding data file columns for
# point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
var_names = ['nrm_var_ICRP', 'nrm_var_ECRP']
# Load experimental data file
data_path = os.path.join(os.path.dirname(__file__), 'fig6_data.csv')
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)
# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_data = np.array([9810.0, 180.0, 1.0])
momp_var = np.array([7245000.0, 3600.0, 1e-9])
# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
(ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)
# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2
def objective_func(x, rate_mask, lb, ub):
caller_frame, _, _, caller_func, _, _ = inspect.stack()[1]
if caller_func in {'anneal', '_minimize_anneal'}:
caller_locals = caller_frame.f_locals
if caller_locals['n'] == 1:
print caller_locals['best_state'].cost, caller_locals['current_state'].cost
# Apply hard bounds
if np.any((x < lb) | (x > ub)):
print "bounds-check failed"
return np.inf
# Simulate model with rates taken from x (which is log transformed)
param_values = np.array([p.value for p in model.parameters])
param_values[rate_mask] = 10 ** x
solver.run(param_values)
# Calculate error for point-by-point trajectory comparisons
e1 = 0
for obs_name, data_name, var_name in zip(obs_names, data_names, var_names):
# Get model observable trajectory (this is the slice expression
# mentioned above in the comment for tspan)
ysim = solver.yobs[obs_name][::tmul]
# Normalize it to 0-1
ysim_norm = ysim / np.nanmax(ysim)
# Get experimental measurement and variance
ydata = exp_data[data_name]
yvar = exp_data[var_name]
# Compute error between simulation and experiment (chi-squared)
e1 += np.sum((ydata - ysim_norm) ** 2 / (2 * yvar)) / len(ydata)
# Calculate error for Td, Ts, and final value for IMS-RP reporter
# =====
# Normalize trajectory
ysim_momp = solver.yobs[momp_obs]
ysim_momp_norm = ysim_momp / np.nanmax(ysim_momp)
# Build a spline to interpolate it
st, sc, sk = scipy.interpolate.splrep(solver.tspan, ysim_momp_norm)
# Use root-finding to find the point where trajectory reaches 10% and 90%
t10 = scipy.interpolate.sproot((st, sc-0.10, sk))[0]
t90 = scipy.interpolate.sproot((st, sc-0.90, sk))[0]
# Calculate Td as the mean of these times
td = (t10 + t90) / 2
# Calculate Ts as their difference
ts = t90 - t10
# Get yfinal, the last element from the trajectory
yfinal = ysim_momp_norm[-1]
# Build a vector of the 3 variables to fit
momp_sim = [td, ts, yfinal]
# Perform chi-squared calculation against mean and variance vectors
e2 = np.sum((momp_data - momp_sim) ** 2 / (2 * momp_var)) / 3
# Calculate error for final cPARP value (ensure all PARP is cleaved)
cparp_final = model.parameters['PARP_0'].value
cparp_final_var = .01
cparp_final_sim = solver.yobs['cPARP'][-1]
e3 = (cparp_final - cparp_final_sim) ** 2 / (2 * cparp_final_var)
error = e1 + e2 + e3
return error
def estimate(start_values=None):
"""Estimate parameter values by fitting to data.
Parameters
==========
parameter_values : numpy array of floats, optional
Starting parameter values. Taken from model's nominal parameter values
if not specified.
Returns
=======
numpy array of floats, containing fitted parameter values.
"""
# Set starting position to nominal parameter values if not specified
if start_values is None:
start_values = nominal_values
else:
assert start_values.shape == nominal_values.shape
# Log-transform the starting position
x0 = np.log10(start_values[rate_mask])
# Displacement size for annealing moves
dx = .02
# The default 'fast' annealing schedule uses the 'lower' and 'upper'
# arguments in a somewhat counterintuitive way. See
# http://projects.scipy.org/scipy/ticket/1126 for more information. This is
# how to get the search to start at x0 and use a displacement on the order
# of dx (note that this will affect the T0 estimation which *does* expect
# lower and upper to be the absolute expected bounds on x).
lower = x0 - dx / 2
upper = x0 + dx / 2
# Log-transform the rate parameter values
xnominal = np.log10(nominal_values[rate_mask])
# Hard lower and upper bounds on x
lb = xnominal - bounds_radius
ub = xnominal + bounds_radius
# Perform the annealing
args = [rate_mask, lb, ub]
(xmin, Jmin, Tfinal, feval, iters, accept, retval) = \
scipy.optimize.anneal(objective_func, x0, full_output=True,
maxiter=4000, quench=0.5,
lower=lower, upper=upper,
args=args)
# Construct vector with resulting parameter values (un-log-transformed)
params_estimated = start_values.copy()
params_estimated[rate_mask] = 10 ** xmin
# Display annealing results
for v in ('xmin', 'Jmin', 'Tfinal', 'feval', 'iters', 'accept', 'retval'):
print "%s: %s" % (v, locals()[v])
return params_estimated
def display(params_estimated):
# Simulate model with nominal parameters and construct a matrix of the
# trajectories of the observables of interest, normalized to 0-1.
solver.run()
obs_names_disp = ['mBid', 'aSmac', 'cPARP']
obs_totals = [model.parameters[n].value for n in ('Bid_0', 'Smac_0', 'PARP_0')]
sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_obs_norm = (sim_obs / obs_totals).T
# Do the same with the estimated parameters
solver.run(params_estimated)
sim_est_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_est_obs_norm = (sim_est_obs / obs_totals).T
# Plot data with simulation trajectories both before and after fitting
color_data = '#C0C0C0'
color_orig = '#FAAA6A'
color_est = '#83C98E'
plt.subplot(311)
plt.errorbar(exp_data['Time'], exp_data['norm_ICRP'],
yerr=exp_data['nrm_var_ICRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[0], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[0], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved IC-RP/Bid', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(312)
plt.vlines(momp_data[0], -0.2, 1.2, color=color_data, linewidth=2)
plt.plot(solver.tspan, sim_obs_norm[1], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[1], color_est, linewidth=2)
plt.ylabel('Td / Fraction of\nreleased Smac', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(313)
plt.errorbar(exp_data['Time'], exp_data['norm_ECRP'],
yerr=exp_data['nrm_var_ECRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[2], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[2], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved EC-RP/PARP', multialignment='center')
plt.xlabel('Time (s)')
plt.axis([0, 20000, -0.2, 1.2])
plt.show()
if __name__ == '__main__':
params_estimated = None
try:
earm_path = sys.modules['earm'].__path__[0]
fit_file = os.path.join(earm_path, '..', 'EARM_2_0_M1a_fitted_params.txt')
params_estimated = np.genfromtxt(fit_file)[:,1].copy()
except IOError:
pass
if params_estimated is None:
np.random.seed(1)
params_estimated = estimate()
display(params_estimated)
| bsd-2-clause |
rahulmkumar/ZeroSim | research/analysis.py | 1 | 9024 | import pandas as pd
import math
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import copy
import numpy as np
'''
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
import csv
'''
#import sys
from datetime import datetime, date
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
'''
def maxloss(date, window_size, df_equity):
for date_idx in range(df_equity.index):
if df_equity.index[date_idx+window_size] <= df_equity.index[-1]:
'''
# Return a data frame of daily equity changes
def portvalchanges(df_equity):
df_port_val_changes = copy.deepcopy(df_equity)
df_port_val_changes = df_port_val_changes * 0
for date_idx in range(0,len(df_equity.index)):
if df_equity.index[date_idx] > df_equity.index[0]:
df_port_val_changes[0].ix[df_equity.index[date_idx]] = df_equity[0].ix[df_equity.index[date_idx]]-df_equity[0].ix[df_equity.index[date_idx-1]]
return df_port_val_changes
def maxdrawdown(df_equity):
df_rollsum = copy.deepcopy(df_equity)
df_rollsum = df_rollsum * 0
#windows = [2,4,8,16,32]
windows = np.arange(2,51)
columns =['rollsum']
index = windows
df_rsum = pd.DataFrame(index=index,columns=columns)
df_rsum = df_rsum.fillna(0)
for window_size in windows:
df_rollsum[0] = pd.rolling_sum(df_equity[0],window_size)
df_rsum['rollsum'].ix[window_size] = df_rollsum[0].min(axis=0)
#df_equity.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_equity.csv')
df_rsum.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_rsum.csv')
df_rollsum.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_rollsum.csv')
return df_rsum.min(axis=0)
def plot_stock(quotes):
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
#ax.xaxis.set_major_locator(mondays)
#ax.xaxis.set_minor_locator(alldays)
#ax.xaxis.set_major_formatter(weekFormatter)
#ax.xaxis.set_minor_formatter(dayFormatter)
#plot_day_summary(ax, quotes, ticksize=3)
candlestick(ax, quotes, width=0.6)
ax.xaxis_date()
ax.autoscale_view()
plt.setp( plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.savefig('stock.pdf', format='pdf')
#plt.show()
def plots(index,series1, series2, series3, series4, file_name):
path = './plots/'+file_name+'_'+str(date.today())+'.pdf'
#pp = PdfPages('./plots/plots.pdf')
pp = PdfPages(path)
tot_symbols = len(series1.columns)
fig = plt.figure()
d = pp.infodict()
d['Title'] = 'Watchlist Chart Book'
d['Author'] = u'Rahul Kumar'
d['Subject'] = 'Watchlist Chart Book'
d['Keywords'] = 'Watchlist Charts'
#d['CreationDate'] = dt.datetime(2009, 11, 13)
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
for subplot in range(1,tot_symbols+1):
#print series1.columns[subplot-1]
#ax = fig.add_subplot(tot_symbols,1,subplot)
plt.plot(index, series1[series1.columns[subplot-1]]) # $SPX 50 days
plt.plot(index, series2[series2.columns[subplot-1]]) # XOM 50 days
plt.plot(index, series3[series3.columns[subplot-1]]) # XOM 50 days
plt.plot(index, series4[series4.columns[subplot-1]]) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend([series1.columns[subplot-1]], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel(series1.columns[subplot-1],size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig(pp, format='pdf')
plt.close()
pp.close()
def plot(index,series1, series2, series3, series4):
#fig = plt.figure()
plt.clf()
plt.plot(index, series1) # $SPX 50 days
plt.plot(index, series2) # XOM 50 days
plt.plot(index, series3) # XOM 50 days
plt.plot(index, series4) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend(['Portfolio', 'SPX '], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel('Date',size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig('channel.pdf', format='pdf')
def analyze(analyzefile):
print 'Inside Analyze'
file_path = 'C:\\Users\\owner\\Documents\\Software\\Python\\Quant\\Examples\\ZeroSum Strategy Suite\\'
#analyze_file = sys.argv[1]
#analyze_file = 'values.csv'
analyze_file = analyzefile
input_file = file_path+analyze_file
port_value = pd.read_csv(input_file, sep=',',index_col = 0, header=0,names=['PortVal'])
port_daily_ret = pd.DataFrame(range(len(port_value)),index=port_value.index, dtype='float')
startdate = datetime.strptime(port_value.index[0],'%Y-%m-%d %H:%M:%S')
enddate = datetime.strptime(port_value.index[len(port_value)-1],'%Y-%m-%d %H:%M:%S')
#benchmark = sys.argv[2]
benchmark = ['$SPX']
#benchmark = ['SPY']
#benchmark = bench
#d_data = data
# Start and End date of the charts
dt_start = dt.datetime(startdate.year, startdate.month, startdate.day)
dt_end = dt.datetime(enddate.year, enddate.month, enddate.day+1)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Creating an object of the dataaccess class with Yahoo as the source.
#c_dataobj = da.DataAccess('Yahoo',verbose=True,cachestalltime = 0)
c_dataobj = da.DataAccess('Yahoo')
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, benchmark, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Filling the data for NAN
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_close = d_data['close']
#df_close = benchdata
bench_daily_ret = pd.DataFrame(range(len(df_close)),index=df_close.index, dtype='float')
bench_val = pd.DataFrame(range(len(port_value)),index=port_value.index)
bench_init_investment = port_value['PortVal'].ix[0]
bench_val[0].ix[0] = bench_init_investment
# Portfolio Daily Returns
for row_idx in range(0,len(ldt_timestamps)):
#Start calculating daily return on day 2
if row_idx > 0:
port_daily_ret[0].ix[row_idx] = (float(port_value['PortVal'].ix[row_idx])/float(port_value['PortVal'].ix[row_idx-1]))-1
# Benchmark Daily Returns
for row_idx in range(0,len(ldt_timestamps)):
#Start calculating daily return on day 2
if row_idx > 0:
bench_daily_ret[0].ix[row_idx] = (float(df_close[benchmark].ix[row_idx])/float(df_close[benchmark].ix[row_idx-1]))-1
#Bench Value
for row_idx in range(1,len(ldt_timestamps)):
bench_val[0].ix[row_idx] = bench_val[0].ix[row_idx-1] * (1+bench_daily_ret[0].ix[row_idx])
avg_port_daily_ret = port_daily_ret.mean(axis=0)
avg_bench_daily_ret = bench_daily_ret.mean(axis=0)
port_vol = port_daily_ret.std(axis=0)
bench_vol = bench_daily_ret.std(axis=0)
port_sharpe = math.sqrt(252)*(avg_port_daily_ret/port_vol)
bench_sharpe = math.sqrt(252)*(avg_bench_daily_ret/bench_vol)
port_cum_ret = float(port_value['PortVal'].ix[len(ldt_timestamps)-1])/float(port_value['PortVal'].ix[0])
bench_cum_ret = df_close[benchmark].ix[len(ldt_timestamps)-1]/df_close[benchmark].ix[0]
# Plotting the plot of daily returns
plt.clf()
plt.plot(ldt_timestamps[0:], port_value['PortVal']) # $SPX 50 days
plt.plot(ldt_timestamps[0:], bench_val[0]) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend(['Portfolio', 'SPX '], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel('Date',size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig('rets.pdf', format='pdf')
print 'Sharpe ratio of fund:'+str(port_sharpe)
print 'Sharpe ratio of benchmark:'+str(bench_sharpe)
print 'Total Return of fund:'+str(port_cum_ret)
print 'Total Return of benchmark:'+str(bench_cum_ret)
print 'Standard Deviation of fund:'+str(port_vol)
print 'Standard Deviation of benchmark:'+str(bench_vol)
print 'Average Daily Return of fund:'+str(avg_port_daily_ret)
print 'Average Daily Return of benchmark:'+str(avg_bench_daily_ret)
| mit |
trungnt13/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
lewismc/image_space | flann_index/image_match.py | 12 | 4269 | # import the necessary packages
from optparse import OptionParser
from scipy.spatial import distance as dist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import glob
import cv2
import sys
import pickle
###########################
def image_match_histogram( all_files, options ):
histograms = {}
image_files = []
# loop over all images
for (i, fname) in enumerate(all_files):
if options.ipath:
path_fname = options.ipath + '/' + fname
else:
path_fname = fname
# read in image
image = cv2.imread( path_fname );
if image is None:
print path_fname + " : fail to read"
continue
image_files.append(fname)
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
print i, path_fname, image.shape
v = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
v = v.flatten()
hist = v / sum(v)
histograms[fname] = hist
pickle.dump( histograms, open( options.opath+"/color_feature.p","wb") )
# feature matrix
feature_matrix = np.zeros( (len(histograms), len(hist)) )
for (i,fi) in enumerate(image_files):
feature_matrix[i,:] = histograms[image_files[i]]
pickle.dump( feature_matrix, open( options.opath+"/color_matrix.p","wb") )
dists = np.zeros((len(image_files), len(image_files)))
knn = {}
# pairwise comparison
for (i, fi) in enumerate(image_files):
for (j, fj) in enumerate(image_files):
if i <= j:
d = cv2.compareHist( histograms[fi], histograms[fj], cv2.cv.CV_COMP_INTERSECT)
dists[i,j] = d
dists[j,i] = d
pickle.dump( dists, open( options.opath+"/color_affinity.p","wb") )
# K nearest neighbors
k=int(options.top)
print 'knn'
for (i, fi) in enumerate(image_files):
vec = sorted( zip(dists[i,:], image_files), reverse = True )
knn[fi] = vec[:k]
print knn[fi]
pickle.dump( knn, open( options.opath+"/color_knn.p","wb") )
# Kmeans clustering
term_crit = (cv2.TERM_CRITERIA_EPS, 100, 0.01)
print feature_matrix
ret, labels, centers = cv2.kmeans(np.float32(feature_matrix), int(options.cluster_count), term_crit, 10, cv2.KMEANS_RANDOM_CENTERS )
label_list=[]
for (i,l) in enumerate(labels):
label_list.append(l[0])
print label_list
image_label = zip( image_files, label_list )
print image_label
pickle.dump( image_label, open( options.opath+"/color_clustering.p","wb") )
###########################
def main():
usage = "usage: %prog [options] image_list_file \n"
usage += " image match"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input_path", default="",
action="store", dest="ipath",
help="input path")
parser.add_option("-o", "--output_path", default=".",
action="store", dest="opath",
help="output path")
parser.add_option("-f", "--feature", default="color_histogram",
action="store", dest="feature",
help="color_histogram; sift_match;dist_info")
parser.add_option("-m", "--method", default="Intersection",
action="store", dest="method",
help="Intersection;L1;L2")
parser.add_option("-t", "--top", default="5",
action="store", dest="top",
help="Top nearest neighbors")
parser.add_option("-c", "--cluster_count", default="3",
action="store", dest="cluster_count",
help="Number of clusters")
parser.add_option("-d", "--debug", default="0",
action="store", dest="debug_mode",
help="debug intermediate results")
(options, args) = parser.parse_args()
if len(args) < 1 :
print "Need one argument: image_list_file \n"
sys.exit(1)
image_files = [line.strip() for line in open(args[0])]
if options.feature == "color_histogram":
image_match_histogram( image_files, options )
if __name__=="__main__":
main()
| apache-2.0 |
sostenibilidad-unam/abm2 | abmpy/LimitMatrix.py | 1 | 1296 | import pandas as pd
class LimitMatrix:
def __init__(self, csv_path):
df = pd.read_csv(csv_path, encoding="utf-8")
firstCriteriaRow = [i for i, x in enumerate(df.ix[:,0]) if "nan" not in str(x)][1] #the index of the second non null cell in first column
self.alternative_names = df.ix[1:firstCriteriaRow-1,1]
self.criteria_names = df.ix[firstCriteriaRow:,1]
criteria_sum = sum(pd.to_numeric(df.ix[firstCriteriaRow:,2]))
alternatives_sum = sum(pd.to_numeric(df.ix[2:firstCriteriaRow-1,2]))
self.weighted_criteria = pd.to_numeric(df.ix[firstCriteriaRow:,2]).apply(lambda x:x/criteria_sum)
self.weighted_alternatives = []
for i in range(1,firstCriteriaRow):
self.weighted_alternatives.append( pd.to_numeric(df.ix[i,2]) / alternatives_sum )
self.alternatives = {}
for i in range(1,len(self.alternative_names)+1):
self.alternatives[self.alternative_names.get_value(i,1)] = self.weighted_alternatives[i-1]
self.criteria = {}
for i in range(len(self.alternative_names)+1,len(self.alternative_names)+len(self.criteria_names)+1):
self.criteria[self.criteria_names.get_value(i,1)] = self.weighted_criteria.get_value(i,1)
| gpl-3.0 |
OSUrobotics/privacy-interfaces | filtering/probability_filters/scripts/localization_filter/weighted_particle_viz.py | 1 | 4801 | #!/usr/bin/env python
import rospy
from amcl.msg import ParticleCloudWeights
from geometry_msgs.msg import PoseArray, Pose
from visualization_msgs.msg import Marker, MarkerArray
import message_filters
from matplotlib import pyplot
from tf.transformations import *
class WeightedParticleVisualizer():
def __init__(self):
self.pub_markers = rospy.Publisher('/weighted_poses', MarkerArray)
rospy.loginfo('Subscribing to cloud of weighted AMCL poses...')
cloud_sub = message_filters.Subscriber('/particlecloud', PoseArray)
weights_sub = message_filters.Subscriber('/particlecloud_weights', ParticleCloudWeights)
ts = message_filters.TimeSynchronizer([cloud_sub, weights_sub], 10)
ts.registerCallback(self.particle_filter_callback)
def particle_filter_callback(self, poses, weights):
# Construct mean pose
how_many = len(poses.poses)
mean_pose = Pose()
RZ = 0
for pose in poses.poses:
mean_pose.position.x += pose.position.x / how_many
mean_pose.position.y += pose.position.y / how_many
mean_pose.position.z += pose.position.z / how_many
q = [pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w]
rx, ry, rz = euler_from_quaternion(q)
RZ += rz / how_many
[mean_pose.orientation.x,
mean_pose.orientation.y,
mean_pose.orientation.z,
mean_pose.orientation.w] = quaternion_about_axis(RZ, (0,0,1))
# Use Decorate-Sort-Undecorate idiom
weights_sum = sum([weight.data for weight in weights.weights])
#print weights_sum
weights_enumerated = [[weight.data / weights_sum, i] for i, weight in enumerate(weights.weights)]
weights_enumerated.sort(reverse=True)
#pyplot.plot([weight[0] for weight in weights_enumerated], '.')
#pyplot.show()
self.confidence = 1.0
if self.confidence == -1: # just the mean pose
poses.poses = [mean_pose]
else:
for i in range(len(weights_enumerated)):
if i > 0:
weights_enumerated[i][0] += weights_enumerated[i-1][0] # add previous weight
if weights_enumerated[i][0] > self.confidence:
rospy.loginfo('Selected {0} poses.'.format(i+1))
break
indices = [el[1] for el in weights_enumerated]
poses.poses = [poses.poses[index] for index in indices[:i+1]]
poses.poses.append(mean_pose) # append mean pose!
weights.weights = [weights.weights[index] for index in indices[:i+1]]
markers = MarkerArray() # clear markers
for i in range(500): # HACK assumes 500 poses
marker = Marker()
marker.header = poses.header
marker.id = i
marker.type = marker.ARROW
marker.action = 0 # add/modify marker
marker.pose = poses.poses[i]
marker.scale.x = 0.15 # constant length
marker.scale.y = 0.02
marker.scale.z = 0.02
marker.color.a = 1.0
#if weights_enumerated[i][0] < 0.20:
if i < 0:
marker.color.r = 0.2
marker.color.g = 0.2
marker.color.b = 0.2
#elif weights_enumerated[i][0] < 0.50:
elif i < 5:
marker.pose.position.z +=0.03
marker.color.r = 0
marker.color.g = 1
marker.color.b = 0
#elif weights_enumerated[i][0] < 1.00:
elif i < 505:
marker.pose.position.z +=0.03
marker.color.r = 0
marker.color.g = 1
marker.color.b = 0
if len(markers.markers) >= 501:
markers.markers[i] = marker
else:
markers.markers.append(marker)
# Add mean pose
marker = Marker()
marker.header = poses.header
marker.id = i
marker.type = marker.ARROW
marker.action = 0 # add/modify marker
marker.pose = mean_pose
marker.pose.position.z += 0.07
marker.scale.x = 0.15 # constant length
marker.scale.y = 0.02
marker.scale.z = 0.02
marker.color.a = 1.0
marker.color.r = 1
marker.color.g = 1
marker.color.b = 1
if len(markers.markers) >= 501:
markers.markers[-1] = marker
else:
markers.markers.append(marker)
self.pub_markers.publish(markers)
if __name__ == "__main__":
rospy.init_node('weighted_particle_visualizer')
viz = WeightedParticleVisualizer()
rospy.spin()
| mit |
schackv/shapewarp | shapewarp_demo/demo.py | 1 | 2588 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 20 10:33:37 2014
@author: schackv
"""
import matplotlib.pyplot as plt
import shapewarp.ASM as ASM
import shapewarp.warp as warp
import shapewarp.plotting as shplt
import example_data
import scipy.misc
from numpy import isnan
def demo():
"""Demonstrate the shape model and warping functionality"""
# Read shape data
data = example_data.examples()
landmarks = data.landmarks()
print(landmarks.shape)
# Plot shapes
[shplt.plot_shape(L,'k') for L in landmarks]
plt.show()
# Active shape model (with generalized Procrustes inside)
asm = ASM.ASM()
asm.build(landmarks)
######## Plotting of shape model ######
f, ax = plt.subplots(2,2)
# Plot mean shape on top of aligned input shapes
plt.sca(ax[0,0])
[shplt.plot_shape(L,'k') for L in asm.AlignedShapes.T]
shplt.plot_shape(asm.MeanShape,'r',linewidth=2.0)
plt.title('Input shapes and mean shape')
# Show covariance matrix as image
plt.sca(ax[0,1])
plt.imshow(asm.Covariance)
plt.title('Covariance matrix')
# Plot the first two eigenmodes
plt.sca(ax[1,0])
shplt.plot_mode(asm.MeanShape,asm.PCModes[0],3)
plt.title('Mean shape +/- 3 std of first mode')
plt.sca(ax[1,1])
shplt.plot_mode(asm.MeanShape,asm.PCModes[1],3)
plt.title('Mean shape +/- 3 std of second mode')
for a in ax.flatten():
a.axis('image')
a.invert_yaxis()
plt.show()
######################################
######## Warping demo ################
image = data.images[ [i.has_image() for i in data.images].index(True) ]
im = image.load()
landmarks = image.landmarks()
W = warp.Warper(asm.MeanShape,scale=0.5) # Warp to 0.3 of the full scale of the mean shape
warped_image = W.warp_image(im,landmarks)
f, ax = plt.subplots(1,3)
titles = ['Original image','Warped image','Mask']
for idx,im in enumerate( (im,warped_image,W.mask)):
plt.sca(ax[idx])
plt.imshow(im)
if idx==0:
shplt.plot_shape(landmarks,'r')
plt.axis('image')
plt.title(titles[idx])
plt.show()
# Save image
mask = W.mask
warped_image[isnan(warped_image)]=0
mask[isnan(mask)] = 0
scipy.misc.toimage(warped_image,cmin=0,cmax=1).save('warpedImage.png')
scipy.misc.toimage(mask,cmin=0,cmax=1).save('mask.png')
######################################
# when executed, just run asm_demo():
if __name__ == '__main__':
demo() | mit |
yyjiang/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 40 | 16837 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
UNR-AERIAL/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
JPFrancoia/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
alivecor/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 37 | 3651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
jangorecki/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_anomaly_deeplearning_large.py | 8 | 1702 | from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
def anomaly():
print("Deep Learning Anomaly Detection MNIST")
train = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
test = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/test.csv.gz"))
predictors = list(range(0,784))
resp = 784
# unsupervised -> drop the response column (digit: 0-9)
train = train[predictors]
test = test[predictors]
# 1) LEARN WHAT'S NORMAL
# train unsupervised Deep Learning autoencoder model on train_hex
ae_model = H2OAutoEncoderEstimator(activation="Tanh", hidden=[2], l1=1e-5, ignore_const_cols=False, epochs=1)
ae_model.train(x=predictors,training_frame=train)
# 2) DETECT OUTLIERS
# anomaly app computes the per-row reconstruction error for the test data set
# (passing it through the autoencoder model and computing mean square error (MSE) for each row)
test_rec_error = ae_model.anomaly(test)
# 3) VISUALIZE OUTLIERS
# Let's look at the test set points with low/median/high reconstruction errors.
# We will now visualize the original test set points and their reconstructions obtained
# by propagating them through the narrow neural net.
# Convert the test data into its autoencoded representation (pass through narrow neural net)
test_recon = ae_model.predict(test)
# In python, the visualization could be done with tools like numpy/matplotlib or numpy/PIL
if __name__ == "__main__":
pyunit_utils.standalone_test(anomaly)
else:
anomaly()
| apache-2.0 |
xuanyuanking/spark | python/pyspark/pandas/tests/test_typedef.py | 15 | 16852 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
import datetime
import decimal
from typing import List
import pandas
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
FloatType,
IntegerType,
LongType,
StringType,
StructField,
StructType,
ByteType,
ShortType,
DateType,
DecimalType,
DoubleType,
TimestampType,
)
from pyspark.pandas.typedef import (
as_spark_type,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
infer_return_type,
pandas_on_spark_type,
)
from pyspark import pandas as ps
class TypeHintTests(unittest.TestCase):
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_from_pandas_instances(self):
def func() -> pd.Series[int]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.Series[np.float]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.float64)
self.assertEqual(inferred.spark_type, DoubleType())
def func() -> "pd.DataFrame[np.float, str]":
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pandas.DataFrame[np.float]":
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.Series[int]":
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[np.float, str]:
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> pd.DataFrame[np.float]:
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.Series[pdf.b.dtype]: # type: ignore
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, CategoricalDtype(categories=["a", "b", "c"]))
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
def test_if_pandas_implements_class_getitem(self):
# the current type hint implementation of pandas DataFrame assumes pandas doesn't
# implement '__class_getitem__'. This test case is to make sure pandas
# doesn't implement them.
assert not ps._frame_has_class_getitem
assert not ps._series_has_class_getitem
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances(self):
def func() -> 'pd.DataFrame["a" : np.float, "b":str]': # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.DataFrame['a': np.float, 'b': int]": # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType(
[StructField("(x, a)", LongType()), StructField("(y, b)", LongType())]
)
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances_negative(self):
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> pd.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> pd.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_infer_schema_with_names_negative(self):
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> ps.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> ps.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> ps.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_as_spark_type_pandas_on_spark_dtype(self):
type_mapper = {
# binary
np.character: (np.character, BinaryType()),
np.bytes_: (np.bytes_, BinaryType()),
np.string_: (np.bytes_, BinaryType()),
bytes: (np.bytes_, BinaryType()),
# integer
np.int8: (np.int8, ByteType()),
np.byte: (np.int8, ByteType()),
np.int16: (np.int16, ShortType()),
np.int32: (np.int32, IntegerType()),
np.int64: (np.int64, LongType()),
np.int: (np.int64, LongType()),
int: (np.int64, LongType()),
# floating
np.float32: (np.float32, FloatType()),
np.float: (np.float64, DoubleType()),
np.float64: (np.float64, DoubleType()),
float: (np.float64, DoubleType()),
# string
np.str: (np.unicode_, StringType()),
np.unicode_: (np.unicode_, StringType()),
str: (np.unicode_, StringType()),
# bool
np.bool: (np.bool, BooleanType()),
bool: (np.bool, BooleanType()),
# datetime
np.datetime64: (np.datetime64, TimestampType()),
datetime.datetime: (np.dtype("datetime64[ns]"), TimestampType()),
# DateType
datetime.date: (np.dtype("object"), DateType()),
# DecimalType
decimal.Decimal: (np.dtype("object"), DecimalType(38, 18)),
# ArrayType
np.ndarray: (np.dtype("object"), ArrayType(StringType())),
List[bytes]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.character]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.bytes_]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.string_]: (np.dtype("object"), ArrayType(BinaryType())),
List[bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[np.bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[datetime.date]: (np.dtype("object"), ArrayType(DateType())),
List[np.int8]: (np.dtype("object"), ArrayType(ByteType())),
List[np.byte]: (np.dtype("object"), ArrayType(ByteType())),
List[decimal.Decimal]: (np.dtype("object"), ArrayType(DecimalType(38, 18))),
List[float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float64]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float32]: (np.dtype("object"), ArrayType(FloatType())),
List[np.int32]: (np.dtype("object"), ArrayType(IntegerType())),
List[int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int64]: (np.dtype("object"), ArrayType(LongType())),
List[np.int16]: (np.dtype("object"), ArrayType(ShortType())),
List[str]: (np.dtype("object"), ArrayType(StringType())),
List[np.unicode_]: (np.dtype("object"), ArrayType(StringType())),
List[datetime.datetime]: (np.dtype("object"), ArrayType(TimestampType())),
List[np.datetime64]: (np.dtype("object"), ArrayType(TimestampType())),
# CategoricalDtype
CategoricalDtype(categories=["a", "b", "c"]): (
CategoricalDtype(categories=["a", "b", "c"]),
LongType(),
),
}
for numpy_or_python_type, (dtype, spark_type) in type_mapper.items():
self.assertEqual(as_spark_type(numpy_or_python_type), spark_type)
self.assertEqual(pandas_on_spark_type(numpy_or_python_type), (dtype, spark_type))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
as_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
as_spark_type(np.dtype("object"))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
pandas_on_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
pandas_on_spark_type(np.dtype("object"))
@unittest.skipIf(not extension_dtypes_available, "The pandas extension types are not available")
def test_as_spark_type_extension_dtypes(self):
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
type_mapper = {
Int8Dtype(): ByteType(),
Int16Dtype(): ShortType(),
Int32Dtype(): IntegerType(),
Int64Dtype(): LongType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_object_dtypes_available, "The pandas extension object types are not available"
)
def test_as_spark_type_extension_object_dtypes(self):
from pandas import BooleanDtype, StringDtype
type_mapper = {
BooleanDtype(): BooleanType(),
StringDtype(): StringType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_float_dtypes_available, "The pandas extension float types are not available"
)
def test_as_spark_type_extension_float_dtypes(self):
from pandas import Float32Dtype, Float64Dtype
type_mapper = {
Float32Dtype(): FloatType(),
Float64Dtype(): DoubleType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
if __name__ == "__main__":
from pyspark.pandas.tests.test_typedef import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/colors.py | 1 | 53190 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of colors called
a colormap. Colormapping typically involves two steps: a data array is first
mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
subclass; then this number in the 0-1 range is mapped to a color using an
instance of a subclass of :class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all the built-in
colormap instances, but is also useful for making custom colormaps, and
:class:`ListedColormap`, which is used for generating a custom colormap from a
list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single color
specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic built-in colors, you can use a single letter
- b: blue
- g: green
- r: red
- c: cyan
- m: magenta
- y: yellow
- k: black
- w: white
Gray shades can be given as a string encoding a float in the 0-1 range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify the
color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in
the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'
are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
import collections
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = list(map(int, parts[:2]))
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR >= 1 and NP_MINOR >= 2
cnames = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# add british equivs
for k, v in list(cnames.items()):
if k.find('gray') >= 0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([np.round(val * 255) for val in rgb[:3]])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, str):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16) / 255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter(object):
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b': (0.0, 0.0, 1.0),
'g': (0.0, 0.5, 0.0),
'r': (1.0, 0.0, 0.0),
'c': (0.0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0.0, 0.0, 0.0),
'w': (1.0, 1.0, 1.0), }
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try:
return self.cache[arg]
except KeyError:
pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try:
return self.cache[arg]
except KeyError:
pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
argl = arg.lower()
color = self.colors.get(argl, None)
if color is None:
str1 = cnames.get(argl, argl)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(argl)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl] * 3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4' % len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError) as exc:
raise ValueError(
'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if arg.lower() == 'none':
return (0.0, 0.0, 0.0, 0.0)
except AttributeError:
pass
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError(
'number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], alpha
r, g, b = arg[:3]
if [x for x in (r, g, b) if (float(x) < 0) or (x > 1)]:
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
r, g, b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r, g, b, alpha
except (TypeError, ValueError) as exc:
raise ValueError(
'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
nc = len(c)
except TypeError:
raise ValueError(
"Cannot convert argument type %s to rgba array" % type(c))
try:
if nc == 0 or c.lower() == 'none':
return np.zeros((0, 4), dtype=np.float)
except AttributeError:
pass
try:
# Single value? Put it in an array with a single row.
return np.array([self.to_rgba(c, alpha)], dtype=np.float)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
if (c.ravel() > 1).any() or (c.ravel() < 0).any():
raise ValueError(
"number in rgba sequence is outside 0-1 range")
result = np.asarray(c, np.float)
if alpha is not None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must be in 0-1 range")
result[:, 3] = alpha
return result
# This alpha operation above is new, and depends
# on higher levels to refrain from setting alpha
# to values other than None unless there is
# intent to override any existing alpha values.
# It must be some other sequence of color specs.
result = np.zeros((nc, 4), dtype=np.float)
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha)
return result
colorConverter = ColorConverter()
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if isinstance(data, collections.Callable):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x) - x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N - 1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = (((xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])) *
(y0[ind] - y1[ind - 1]) + y1[ind - 1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap(object):
"""
Baseclass for all scalar to RGBA mappings.
Typically Colormap instances are used to convert data values (floats) from
the interval ``[0, 1]`` to the RGBA color that the respective Colormap
represents. For scaling of data into the ``[0, 1]`` interval see
:class:`matplotlib.colors.Normalize`. It is worth noting that
:class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
``data->normalize->map-to-color`` processing chain.
"""
def __init__(self, name, N=256):
r"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of rgb quantization levels.
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N + 1
self._i_bad = N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: :class:`matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : scalar, ndarray
The data value(s) to convert to RGBA.
For floats, X should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, X should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float, None
Alpha must be a scalar between 0 and 1, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, othewise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
# See class docstring for arg/kwarg documentation.
if not self._isinit:
self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=True) # Copy here to avoid side effects.
mask_bad = xma.mask # Mask will be used below.
xa = xma.filled() # Fill to avoid infs, etc.
del xma
# Calculations with native byteorder are faster, and avoid a
# bug that otherwise can occur with putmask when the last
# argument is a numpy scalar.
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder()
if xa.dtype.kind == "f":
# Treat 1.0 as slightly less than 1.
vals = np.array([1, 0], dtype=xa.dtype)
almost_one = np.nextafter(*vals)
cbook._putmask(xa, xa == 1.0, almost_one)
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
if NP_CLIP_OUT:
np.clip(xa, -1, self.N, out=xa)
else:
xa = np.clip(xa, -1, self.N)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa < 0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa > self.N - 1, self._i_over)
cbook._putmask(xa, xa < 0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:, -1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0, :])
return rgba
def set_bad(self, color='k', alpha=None):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_under(self, color='k', alpha=None):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_over(self, color='k', alpha=None):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit:
self._init()
return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and
np.alltrue(self._lut[:, 0] == self._lut[:, 2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = makeMappingArray(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = list(zip(*colors))
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[], alpha=[])
for val, color in zip(vals, colors):
r, g, b, a = colorConverter.to_rgba(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
cdict['alpha'].append((val, a, a))
return LinearSegmentedColormap(name, cdict, N, gamma)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name='from_list', N=None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 or Nx4 floating point array
(*N* rgb or rgba values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are
# identical; needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try:
gray = float(self.colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgba = colorConverter.to_rgba_array(self.colors)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3] = rgba
self._isinit = True
self._set_extremes()
class Normalize(object):
"""
A class which, when called, can normalize data into
the ``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
if cbook.iterable(value):
is_scalar = False
result = ma.asarray(value)
if result.dtype.kind == 'f':
if isinstance(value, np.ndarray):
result = result.copy()
elif result.dtype.itemsize > 2:
result = result.astype(np.float)
else:
result = result.astype(np.float32)
else:
is_scalar = True
result = ma.array([value]).astype(np.float)
return result, is_scalar
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin = float(self.vmin)
vmax = float(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
def __init__(self, linthresh, linscale=1.0,
vmin=None, vmax=None, clip=False):
"""
*linthresh*:
The range within which the plot is linear (to
avoid having the plot go to infinity around zero).
*linscale*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range. Defaults to 1.
"""
Normalize.__init__(self, vmin, vmax, clip)
self.linthresh = linthresh
self._linscale_adj = (linscale / (1.0 - np.e ** -1))
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = self._transform(result.data)
resdat -= self._lower
resdat /= (self._upper - self._lower)
if is_scalar:
result = result[0]
return result
def _transform(self, a):
"""
Inplace transformation.
"""
masked = np.abs(a) > self.linthresh
sign = np.sign(a[masked])
log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
log *= sign * self.linthresh
a[masked] = log
a[~masked] *= self._linscale_adj
return a
def _inv_transform(self, a):
"""
Inverse inplace Transformation.
"""
masked = np.abs(a) > (self.linthresh * self._linscale_adj)
sign = np.sign(a[masked])
exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
exp *= sign * self.linthresh
a[masked] = exp
a[~masked] /= self._linscale_adj
return a
def _transform_vmin_vmax(self):
"""
Calculates vmin and vmax in the transformed system.
"""
vmin, vmax = self.vmin, self.vmax
arr = np.array([vmax, vmin])
self._upper, self._lower = self._transform(arr)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
val = ma.asarray(value)
val = val * (self._upper - self._lower) + self._lower
return self._inv_transform(val)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
self._transform_vmin_vmax()
def autoscale_None(self, A):
""" autoscale only None-valued vmin or vmax """
if self.vmin is not None and self.vmax is not None:
pass
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
self._transform_vmin_vmax()
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N - 1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax + 1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx >= b] = i
if self._interp:
scalefac = float(self.Ncmap - 1) / (self.N - 2)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = cbook.deprecated('1.3', alternative='Normalize',
name='normalize',
obj_type='class alias')(Normalize)
no_norm = cbook.deprecated('1.3', alternative='NoNorm',
name='no_norm',
obj_type='class alias')(NoNorm)
def rgb_to_hsv(arr):
"""
convert rgb values in a numpy array to hsv values
input and output arrays should have shape (M,N,3)
"""
out = np.zeros(arr.shape, dtype=np.float)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[:, :, 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:, :, 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[:, :, 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[:, :, 0] = (out[:, :, 0] / 6.0) % 1.0
out[:, :, 1] = s
out[:, :, 2] = arr_max
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
both input and output arrays have shape (M,N,3)
"""
h = hsv[:, :, 0]
s = hsv[:, :, 1]
v = hsv[:, :, 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(np.int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce rgb values for a shaded relief image
given a data array.
"""
def __init__(self, azdeg=315, altdeg=45,
hsv_min_val=0, hsv_max_val=1, hsv_min_sat=1,
hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
The color of the resulting image will be darkened
by moving the (s,v) values (in hsv colorspace) toward
(hsv_min_sat, hsv_min_val) in the shaded regions, or
lightened by sliding (s,v) toward
(hsv_max_sat hsv_max_val) in regions that are illuminated.
The default extremes are chose so that completely shaded points
are nearly black (s = 1, v = 0) and completely illuminated points
are nearly white (s = 0, v = 1).
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def shade(self, data, cmap):
"""
Take the input data array, convert to HSV values in the
given colormap, then adjust those color values
to given the impression of a shaded relief map with a
specified light source.
RGBA values are returned, which can then be used to
plot the shaded image with imshow.
"""
rgb0 = cmap((data - data.min()) / (data.max() - data.min()))
rgb1 = self.shade_rgb(rgb0, elevation=data)
rgb0[:, :, 0:3] = rgb1
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1.):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
"""
# imagine an artificial sun placed at infinity in some azimuth and
# elevation position illuminating our surface. The parts of the
# surface that slope toward the sun should brighten while those sides
# facing away should become darker. convert alt, az to radians
az = self.azdeg * np.pi / 180.0
alt = self.altdeg * np.pi / 180.0
# gradient in x and y directions
dx, dy = np.gradient(elevation)
slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))
aspect = np.arctan2(dx, dy)
intensity = (np.sin(alt) * np.sin(slope) + np.cos(alt) *
np.cos(slope) * np.cos(-az - aspect - 0.5 * np.pi))
# rescale to interval -1,1
# +1 means maximum sun exposure and -1 means complete shade.
intensity = (intensity - intensity.min()) / \
(intensity.max() - intensity.min())
intensity = (2. * intensity - 1.) * fraction
# convert to rgb, then rgb to hsv
#rgb = cmap((data-data.min())/(data.max()-data.min()))
hsv = rgb_to_hsv(rgb[:, :, 0:3])
# modify hsv values to simulate illumination.
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity > 0),
((1. - intensity) * hsv[:, :, 1] +
intensity * self.hsv_max_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity > 0,
((1. - intensity) * hsv[:, :, 2] +
intensity * self.hsv_max_val),
hsv[:, :, 2])
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity < 0),
((1. + intensity) * hsv[:, :, 1] -
intensity * self.hsv_min_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity < 0,
((1. + intensity) * hsv[:, :, 2] -
intensity * self.hsv_min_val),
hsv[:, :, 2])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the :class:`BoundaryNorm`.
Values ``v`` are quantizized to level ``i`` if
``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If `extend` is "neither" there
must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
one extra color, and for an `extend` of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See :func:`~matplotlib.pyplot.contourf` for details.
Returns
-------
(cmap, norm) : tuple containing a :class:`Colormap` and a \
:class:`Normalize` instance
"""
colors_i0 = 0
colors_i1 = None
if extend == 'both':
colors_i0 = 1
colors_i1 = -1
extra_colors = 2
elif extend == 'min':
colors_i0 = 1
extra_colors = 1
elif extend == 'max':
colors_i1 = -1
extra_colors = 1
elif extend == 'neither':
extra_colors = 0
else:
raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
n_data_colors = len(levels) - 1
n_expected_colors = n_data_colors + extra_colors
if len(colors) != n_expected_colors:
raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
' n_colors == {2!r}. Got {3!r}.'
''.format(extend, len(levels), n_expected_colors,
len(colors)))
cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
| gpl-3.0 |
ArtOfCode-/quality-machine | sockets.py | 1 | 1806 | from sklearn import svm
from typing import Any
import json
class SocketMessage:
def __init__(self, action: str, data: Any):
self.action = action
self.data = json.dumps(data)
def __str__(self) -> str:
return "SocketMessage<action={}, data={}>".format(self.action, self.data)
class SocketResponse:
def __init__(self, socket_response: SocketMessage=None, chat_response: str=None):
self.socket = socket_response
self.chat = chat_response
def __str__(self) -> str:
return "SocketResponse<socket={}, chat={}>".format(str(self.socket), self.chat)
def handle_frame(action: str, data: dict) -> SocketResponse:
"""
Handles a single received message from the websocket.
:param action: The action received from the websocket.
:param data: The deserialized data from the websocket frame.
:param clf: A machine-learning classifier instance.
:return: A SocketResponse for the supercaller to process.
"""
action_handlers = {
'hb': handle_heartbeat,
'97-questions-newest': handle_new_question
}
if action in action_handlers:
return action_handlers[action](data)
else:
return SocketResponse()
def handle_heartbeat(data: dict) -> SocketResponse:
return SocketResponse(socket_response=SocketMessage("hb", "hb"))
def handle_new_question(data: dict) -> SocketResponse:
"""
Processes a new question, as reported by the websocket.
:param data: The deserialized data from the websocket frame.
:param clf: A machine-learning classifier instance.
:return: A SocketResponse for the supercaller to process.
"""
print(data)
return SocketResponse(chat_response="New question posted: [{0}](http://english.stackexchange.com/q/{0})".format(data['id']))
| mit |
libMesh/libmesh | doc/statistics/cloc_libmesh.py | 2 | 12275 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import math
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# git checkout `git rev-list -n 1 --before="$my_date" master`
# cloc.pl src/*/*.C include/*/*.h
data = [
# 2003 - All data from archived svn repo
# '2003-01-10', 158, 29088, # SVN revision 4 - this is the first revision with trunk/libmesh
# '2003-01-20', 184, 28937, # SVN revision 11
# '2003-01-24', 198, 31158, # SVN revision 23
'2003-02-04', 198, 31344, # SVN revision 47
'2003-03-04', 243, 36036,
'2003-04-04', 269, 39946,
'2003-05-04', 275, 40941,
'2003-06-04', 310, 44090,
'2003-07-04', 319, 44445,
'2003-08-04', 322, 45225,
'2003-09-04', 325, 46762,
'2003-10-04', 327, 47151,
'2003-11-04', 327, 47152, # Up to now, all the include files were in the same directory
'2003-12-04', 327, 47184,
# 2004 - All data from archived svn repo
'2004-01-04', 339, 48437,
'2004-02-04', 343, 50455,
'2004-03-04', 347, 52198,
'2004-04-04', 358, 52515,
'2004-05-04', 358, 52653,
'2004-06-04', 369, 53953,
'2004-07-04', 368, 53981,
'2004-08-04', 371, 54316,
'2004-09-04', 371, 54510,
'2004-10-04', 375, 55785,
'2004-11-04', 375, 55880,
'2004-12-04', 384, 56612,
# 2005 - All data from archived svn repo
'2005-01-04', 385, 56674,
'2005-02-04', 406, 61034,
'2005-03-04', 406, 62423,
'2005-04-04', 403, 62595,
'2005-05-04', 412, 63540,
'2005-06-04', 416, 69619,
'2005-07-04', 425, 72092,
'2005-08-04', 425, 72445,
'2005-09-04', 429, 74148,
'2005-10-04', 429, 74263,
'2005-11-04', 429, 74486,
'2005-12-04', 429, 74629,
# 2006 - All data from archived svn repo
'2006-01-04', 429, 74161,
'2006-02-04', 429, 74165,
'2006-03-04', 429, 74170,
'2006-04-04', 429, 74864,
'2006-05-04', 433, 73847,
'2006-06-04', 438, 74681,
'2006-07-04', 454, 76954,
'2006-08-04', 454, 77464,
'2006-09-04', 454, 77843,
'2006-10-04', 454, 78051,
'2006-11-04', 463, 78683,
'2006-12-04', 463, 79057,
# 2007 - All data from archived svn repo
'2007-01-04', 463, 79149,
'2007-02-04', 475, 79344,
'2007-03-04', 479, 81416,
'2007-04-04', 479, 81468,
'2007-05-04', 481, 84312,
'2007-06-04', 481, 85565,
'2007-07-04', 482, 85924,
'2007-08-04', 485, 86248,
'2007-09-04', 487, 86481,
'2007-10-04', 497, 87926,
'2007-11-04', 502, 89687,
'2007-12-04', 512, 93523,
# 2008 - All data from archived svn repo
'2008-01-04', 512, 94263,
'2008-02-04', 515, 94557,
'2008-03-04', 526, 98127,
'2008-04-04', 526, 98256,
'2008-05-04', 531, 99715,
'2008-06-04', 531, 99963,
'2008-07-04', 538, 100839,
'2008-08-04', 542, 101682,
'2008-09-04', 548, 102163,
'2008-10-04', 556, 104185,
'2008-11-04', 558, 104535,
'2008-12-04', 565, 106318,
# 2009 - All data from archived svn repo
'2009-01-04', 565, 106340,
'2009-02-04', 579, 108431,
'2009-03-04', 584, 109050,
'2009-04-04', 584, 109922,
'2009-05-04', 589, 110821,
'2009-06-04', 591, 111094,
'2009-07-04', 591, 111571,
'2009-08-04', 591, 111555,
'2009-09-04', 591, 111746,
'2009-10-04', 591, 111920,
'2009-11-04', 595, 112993,
'2009-12-04', 597, 113744,
# 2010 - All data from archived svn repo
'2010-01-04', 598, 113840,
'2010-02-04', 600, 114378,
'2010-03-04', 602, 114981,
'2010-04-04', 603, 115509,
'2010-05-04', 603, 115821,
'2010-06-04', 603, 115875,
'2010-07-04', 627, 126159,
'2010-08-04', 627, 126217,
'2010-09-04', 628, 126078,
'2010-10-04', 642, 129417,
'2010-11-04', 643, 130045,
'2010-12-04', 648, 131363,
# 2011 - All data from archived svn repo
'2011-01-04', 648, 131644,
'2011-02-04', 648, 132105,
'2011-03-04', 658, 132950,
'2011-04-04', 661, 133643,
'2011-05-04', 650, 133958,
'2011-06-04', 662, 134447,
'2011-07-04', 667, 134938,
'2011-08-04', 679, 136338,
'2011-09-04', 684, 138165,
'2011-10-04', 686, 138627,
'2011-11-04', 690, 141876,
'2011-12-04', 690, 142096,
# 2012
'2012-01-04', 694, 142345,
'2012-02-04', 697, 142585,
'2012-03-04', 703, 146127,
'2012-04-04', 706, 147191,
'2012-05-04', 708, 148202,
'2012-06-04', 705, 148334,
'2012-07-04', 713, 150066,
'2012-08-04', 727, 152269,
'2012-09-04', 725, 152381,
'2012-10-04', 734, 155213, # cloc reports 1092 and 1094 files for Oct/Nov, Don't know what happened...
'2012-11-04', 743, 156082, # We moved from libmesh/src to src around here so maybe that caused it?
'2012-12-04', 752, 156903,
# 2013
'2013-01-04', 754, 158689, # 8f3e4977
'2013-02-04', 770, 161001, # f495444d
'2013-03-04', 776, 162239, # a7db13bb
'2013-04-04', 783, 162986, # bcb7ede1
'2013-05-04', 785, 163808, # 1f8be16b
'2013-06-04', 785, 164022, # bb96e8a6
'2013-07-04', 789, 163854, # 6651e65b
'2013-08-04', 789, 164269, # ee336c6d
'2013-09-04', 790, 165129, # bd37bb54
'2013-10-04', 790, 165447, # 36341107
'2013-11-04', 792, 166342, # eb1a1b7d
'2013-12-04', 794, 168812, # 474509c0
# 2014
'2014-01-04', 796, 170174, # f7e9b2a2
'2014-02-04', 796, 170395, # a93acc24
'2014-03-04', 799, 172037, # 799c3521
'2014-04-04', 801, 172230, # 46974589
'2014-05-04', 806, 173772, # 66d4e144
'2014-06-04', 807, 171098, # e437059d
'2014-07-04', 807, 171220, # 86e6540c
'2014-08-04', 808, 172534, # 3b5bb943
'2014-09-04', 808, 173694, # ef4465a5
'2014-10-04', 819, 175750, # eedbf7b3
'2014-11-04', 819, 176415, # c9675dcc
'2014-12-04', 819, 176277, # c3b2bc9f
# 2015
'2015-01-04', 819, 176289, # 18abbe4d
'2015-02-04', 824, 176758, # 1034fe81
'2015-03-04', 825, 176958, # 54bc2d27
'2015-04-04', 830, 176926, # c9451c01
'2015-05-04', 826, 176659, # e9e008a6
'2015-06-04', 835, 178411, # 5f771ed6
'2015-07-04', 840, 179578, # ea34669e
'2015-08-04', 844, 180953, # eb301034
'2015-09-04', 846, 181675, # ddab3b52
'2015-10-04', 849, 181196, # 6d36bc77
'2015-11-04', 848, 181385, # acc4cc5b
'2015-12-04', 849, 180331, # f434f93f
# 2016
'2016-01-04', 849, 180538, # 0de29508
'2016-02-04', 846, 182937, # 04b618f4
'2016-03-04', 846, 182727, # f63ac0b8
'2016-04-04', 849, 183261, # a59cce15
'2016-05-04', 849, 183176, # 4c78b30b
'2016-06-04', 853, 184649, # 3393e1a9
'2016-07-04', 839, 183363, # ead29425
'2016-08-04', 837, 183288, # 8406aac3
'2016-09-04', 842, 183850, # 1bf9f548
'2016-10-04', 848, 185062, # 72f8aa7d
'2016-11-04', 850, 185408, # 3a90559b
'2016-12-04', 853, 185683, # 4636ea58
# 2017
'2017-01-04', 853, 185885, # 6c7743ee
'2017-02-04', 853, 186246, # be0ecd40
'2017-03-04', 850, 184993, # 7913dc77
'2017-04-04', 856, 185905, # 1e5cb6f6
'2017-05-04', 855, 186311, # dfd89fe6
'2017-06-04', 855, 186441, # 642b81d3
'2017-07-04', 856, 186664, # 586e9751
'2017-08-04', 856, 186707, # 5a6642bf
'2017-09-04', 856, 186793, # d75605cb
'2017-10-04', 856, 187219, # b291e377
'2017-11-04', 861, 186893, # 4d08770f
'2017-12-04', 863, 187335, # 7e8c93f0
# 2018
'2018-01-04', 862, 186607, # 0a86a3c1
'2018-02-04', 862, 186902, # 158829d4
'2018-03-04', 862, 187127, # 3287318f
'2018-04-04', 862, 186557, # 0dcfe02f
'2018-05-04', 879, 186594, # ad06819b
'2018-06-04', 880, 186738, # bfa9b7a3
'2018-07-04', 882, 189018, # 92c9b163
'2018-08-04', 884, 189659, # fee809be
'2018-09-04', 884, 190046, # bd3db5ba
'2018-10-04', 886, 190239, # b7c021ef
'2018-11-04', 886, 190164, # b68a3414
'2018-12-04', 886, 190650, # 3134aa86
# 2019
'2019-01-04', 886, 191341, # 08ea2d6d
'2019-02-04', 879, 189708, # 3679dac7
'2019-03-04', 879, 190253, # 0a047066
'2019-04-04', 879, 190583, # 260d091f
'2019-05-04', 880, 192048, # c4c9fd54
'2019-06-04', 880, 192174, # 49e6d8fa
'2019-07-04', 885, 192442, # 5469d454
'2019-08-04', 886, 191947, # e3f7c8e2
'2019-09-04', 893, 194600, # 2d7cfaac
'2019-10-04', 898, 195670, # d252e82f
'2019-11-04', 899, 195840, # bd0812c7
'2019-12-04', 896, 191898, # ac649146
# 2020
'2020-01-04', 900, 192704, # 259ad8f4
'2020-02-04', 900, 193538, # 3d4ec1c6
'2020-03-04', 901, 194935, # 56ffd2f6
'2020-04-04', 904, 196199, # 9ac9b4b9
'2020-05-04', 904, 196658, # 6e32c593
'2020-06-04', 904, 197092, # f707c65a
'2020-07-04', 905, 197773, # b9d342ba
'2020-08-04', 906, 198400, # cb8514e3
'2020-09-04', 906, 198749, # 1630a53b
'2020-10-04', 907, 199497, # e9c15910
'2020-11-04', 909, 200385, # 4825db9c
'2020-12-04', 909, 200392, # 7a6d338b
# 2021
'2021-01-04', 909, 200705, # 3b410bcf
'2021-02-04', 911, 201006, # 0c89409c
'2021-03-04', 913, 201897, # d95eef7b
'2021-04-04', 913, 202506, # 5298bf63
'2021-05-04', 914, 204952, # 27b4a43d
'2021-06-04', 914, 205061, # 16dff2ff
]
# Extract the dates from the data array
date_strings = data[0::3]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%Y-%m-%d')))
# Extract number of files from data array
n_files = data[1::3]
# Extract number of lines of code from data array
n_lines = data[2::3]
# Get a reference to the figure
fig = plt.figure()
# add_subplot(111) is equivalent to Matlab's subplot(1,1,1) command.
# The colors used come from sns.color_palette("muted").as_hex(). They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
# We use the twinx() command to add a second y-axis
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
# We use the grid lines from the second axis (lines of code) as I think
# that is generally of more interest than number of files. I ran into
# an issue using axis='both', but turning on the x-grid on ax1 and the
# y-grid on ax2 seems to do the trick.
# According to this SO, we can force the grid lines to be displayed "below"
# the data using these flags, but this did not work for me.
# https://stackoverflow.com/questions/1726391/matplotlib-draw-grid-lines-behind-other-graph-elements
ax1.set_axisbelow(True)
ax2.set_axisbelow(True)
ax1.grid(b=True, axis='x', color='lightgray', linestyle='--', linewidth=1, alpha=0.25)
ax2.grid(b=True, axis='y', color='lightgray', linestyle='--', linewidth=1, alpha=0.25)
# Plot number of files vs. time
ax1.plot(date_nums, n_files, color=u'#4878cf', marker='o', linestyle='-', markersize=4, markevery=5)
ax1.set_ylabel('Files (blue circles)')
# Set up x-tick locations
ticks_names = ['2003', '2005', '2007', '2009', '2011', '2013', '2015', '2017', '2019', '2021']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime(x + '-03-04', '%Y-%m-%d')))
# Set tick labels and positions
ax1.set_xticks(tick_nums)
ax1.set_xticklabels(ticks_names)
# Plot lines of code vs. time
ax2.plot(date_nums, np.divide(n_lines, 1000.), color=u'#6acc65', marker='s', linestyle='-', markersize=4, markevery=5)
ax2.set_ylabel('Lines of code in thousands (green squares)')
# Trying to get the grid lines "under" the data using the method described here:
# https://stackoverflow.com/questions/1726391/matplotlib-draw-grid-lines-behind-other-graph-elements
# but this does not seem to have any effect no matter what number I use.
# [line.set_zorder(10) for line in ax1.lines]
# [line.set_zorder(10) for line in ax2.lines]
# Create linear curve fits of the data
files_fit = np.polyfit(date_nums, n_files, 1)
lines_fit = np.polyfit(date_nums, n_lines, 1)
# Convert to files/month
files_per_month = files_fit[0]*(365./12.)
lines_per_month = lines_fit[0]*(365./12.)
# Print curve fit data on the plot , '%.1f'
# files_msg = 'Approx. ' + '%.1f' % files_per_month + ' files added/month'
# lines_msg = 'Approx. ' + '%.1f' % lines_per_month + ' lines added/month'
# ax1.text(date_nums[len(date_nums)/4], 300, files_msg);
# ax1.text(date_nums[len(date_nums)/4], 250, lines_msg);
# Save as PDF
plt.savefig('cloc_libmesh.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/panel.py | 7 | 55818 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import numpy as np
import warnings
from pandas.core.dtypes.cast import (
infer_dtype_from_scalar,
maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.core.dtypes.missing import notnull
import pandas.core.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.io.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
DeprecationWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.