repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
antoinearnoud/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/almost_ideal_demand_system/aids_dataframe_builder_energy.py | 4 | 13984 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 01 09:57:13 2016
@author: thomas.douenne
"""
from __future__ import division
import pandas as pd
import numpy as np
import os
import pkg_resources
from openfisca_france_indirect_taxation.examples.utils_example import get_input_data_frame
from openfisca_france_indirect_taxation.almost_ideal_demand_system.aids_price_index_builder import \
df_indice_prix_produit
from openfisca_france_indirect_taxation.almost_ideal_demand_system.utils import \
add_area_dummy, add_stalog_dummy, add_vag_dummy, electricite_only, indices_prix_carbus, price_carbu_pond
assets_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location
)
# On importe la dataframe qui recense les indices de prix. Notre objectif est de construire une nouvelle dataframe avec
# le reste des informations, i.e. la consommation et autres variables pertinentes concernant les ménages.
# On commence par cinstruire une dataframe appelée data_conso rassemblant les informations sur les dépenses des ménages.
data_frame_for_reg = None
data_frame_all_years = pd.DataFrame()
for year in [2000, 2005, 2011]:
aggregates_data_frame = get_input_data_frame(year)
# Pour estimer QAIDS, on se concentre sur les biens non-durables.
# On élimine donc les biens durables de cette dataframe: 442 : redevance d'enlèvement des ordures, 711, 712, 713 :
# achat de véhicules, 911, 912, 9122, 913, 9151 : technologies high-tech, 9211, 921, 923: gros équipements loisirs,
# 941, 960 : voyages séjours et cadeaux, 10i0 : enseignement, 12.. : articles de soin et bijoux
biens_durables = ['poste_coicop_442', 'poste_coicop_711', 'poste_coicop_712', 'poste_coicop_713',
'poste_coicop_911', 'poste_coicop_912', 'poste_coicop_9122', 'poste_coicop_913', 'poste_coicop_9151',
'poste_coicop_9211', 'poste_coicop_921', 'poste_coicop_922', 'poste_coicop_923', 'poste_coicop_960',
'poste_coicop_941', 'poste_coicop_1010', 'poste_coicop_1015', 'poste_coicop_10152', 'poste_coicop_1020',
'poste_coicop_1040', 'poste_coicop_1050', 'poste_coicop_1212', 'poste_coicop_1231', 'poste_coicop_1240',
'poste_coicop_12411', 'poste_coicop_1270']
for bien in biens_durables:
try:
aggregates_data_frame = aggregates_data_frame.drop(bien, axis = 1)
except:
aggregates_data_frame = aggregates_data_frame
produits_alimentaire = ['poste_coicop_111', 'poste_coicop_112', 'poste_coicop_113', 'poste_coicop_114',
'poste_coicop_115', 'poste_coicop_1151', 'poste_coicop_116', 'poste_coicop_117', 'poste_coicop_118',
'poste_coicop_1181', 'poste_coicop_119', 'poste_coicop_121', 'poste_coicop_122']
energie_logement = ['poste_coicop_451', 'poste_coicop_4511', 'poste_coicop_452', 'poste_coicop_4522',
'poste_coicop_453', 'poste_coicop_454', 'poste_coicop_455', 'poste_coicop_4552']
produits = [column for column in aggregates_data_frame.columns if column[:13] == 'poste_coicop_']
del column
aggregates_data_frame['depenses_alime'] = sum(aggregates_data_frame[alime] for alime in produits_alimentaire)
aggregates_data_frame['depenses_carbu'] = aggregates_data_frame['poste_coicop_722']
aggregates_data_frame['depenses_logem'] = 0
for logem in energie_logement:
try:
aggregates_data_frame['depenses_logem'] += aggregates_data_frame[logem]
except:
pass
aggregates_data_frame['depenses_tot'] = 0
for produit in produits:
if produit[13:15] != '99' and produit[13:15] != '13':
aggregates_data_frame['depenses_tot'] += aggregates_data_frame[produit]
aggregates_data_frame['depenses_autre'] = (
aggregates_data_frame['depenses_tot'] - aggregates_data_frame['depenses_alime'] -
aggregates_data_frame['depenses_carbu'] - aggregates_data_frame['depenses_logem'])
data_conso = aggregates_data_frame[
produits + ['ident_men', 'vag', 'depenses_alime', 'depenses_autre', 'depenses_carbu', 'depenses_logem']
].copy()
# On renverse la dataframe pour obtenir une ligne pour chaque article consommé par chaque personne
df = pd.melt(data_conso, id_vars = ['vag', 'ident_men'], value_vars=produits,
value_name = 'depense_bien', var_name = 'bien')
df_indice_prix_produit = df_indice_prix_produit[['indice_prix_produit', 'prix', 'temps', 'mois']]
df['vag'] = df['vag'].astype(str)
df['indice_prix_produit'] = df['bien'] + '_' + df['vag']
# On merge les prix des biens avec les dépenses déjà présentes dans df. Le merge se fait sur 'indice_prix_produit'
# Indice prix produit correspond à poste_coicop_xyz_vag
df_depenses_prix = pd.merge(df, df_indice_prix_produit, on = 'indice_prix_produit')
# df_depenses_prix contient les dépenses de consommation et les prix associés à ces dépenses.
# Il faut maintenant construire les catégories de biens que l'on souhaite comparer.
df_depenses_prix['type_bien'] = 'autre'
df_depenses_prix.loc[df_depenses_prix['bien'] == 'poste_coicop_722', 'type_bien'] = 'carbu'
for alime in produits_alimentaire:
df_depenses_prix.loc[df_depenses_prix['bien'] == alime, 'type_bien'] = 'alime'
for logem in energie_logement:
df_depenses_prix.loc[df_depenses_prix['bien'] == logem, 'type_bien'] = 'logem'
del alime, logem, produit
# Construire les indices de prix pondérés pour les deux catégories
df_depenses_prix[['type_bien', 'ident_men']] = df_depenses_prix[['type_bien', 'ident_men']].astype(str)
df_depenses_prix['id'] = df_depenses_prix['type_bien'] + '_' + df_depenses_prix['ident_men']
data_conso['ident_men'] = data_conso['ident_men'].astype(str)
df_depenses_prix = pd.merge(
df_depenses_prix, data_conso[['depenses_alime', 'depenses_autre', 'depenses_carbu',
'depenses_logem', 'ident_men']], on = 'ident_men'
)
del data_conso
df_depenses_prix[['depenses_alime', 'depenses_autre', 'depense_bien', 'depenses_carbu',
'depenses_logem', 'prix']] = df_depenses_prix[['depenses_alime', 'depenses_autre',
'depense_bien', 'depenses_carbu', 'depenses_logem', 'prix']].astype(float)
df_depenses_prix['part_bien_categorie'] = 0
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'alime', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_alime']
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'autre', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_autre']
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'carbu', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_carbu']
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'logem', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_logem']
df_depenses_prix.fillna(0, inplace=True)
# Les parts des biens dans leur catégorie permettent de construire des indices de prix pondérés (Cf. Lewbel)
df_depenses_prix['indice_prix_pondere'] = 0
df_depenses_prix['indice_prix_pondere'] = df_depenses_prix['part_bien_categorie'] * df_depenses_prix['prix']
# grouped donne l'indice de prix pondéré pour chacune des deux catégories pour chaque individu
# On met cette dataframe en forme pour avoir pour chaque individu l'indice de prix pour chaque catégorie
# Cela donne df_prix_to_merge
df_depenses_prix.sort_values(by = ['id'])
grouped = df_depenses_prix['indice_prix_pondere'].groupby(df_depenses_prix['id'])
assert len(grouped) == 4 * len(aggregates_data_frame), 'There is an issue in the aggregation of prices'
grouped = grouped.aggregate(np.sum)
grouped.index.name = 'id'
grouped = grouped.reset_index()
grouped['categorie'] = grouped['id'].str[:5]
categories = ['alime', 'autre', 'carbu', 'logem']
for categorie in categories:
grouped['prix_' + categorie] = 0
grouped.loc[grouped['categorie'] == categorie, 'prix_' + categorie] = grouped['indice_prix_pondere']
grouped_alime = grouped[grouped['categorie'] == 'alime'].copy()
grouped_alime['ident_men'] = grouped_alime['id'].str[6:]
grouped_autre = grouped[grouped['categorie'] == 'autre'].copy()
grouped_autre['ident_men'] = grouped_autre['id'].str[6:]
grouped_carbu = grouped[grouped['categorie'] == 'carbu'].copy()
grouped_carbu['ident_men'] = grouped_carbu['id'].str[6:]
grouped_logem = grouped[grouped['categorie'] == 'logem'].copy()
grouped_logem['ident_men'] = grouped_logem['id'].str[6:]
df_prix_to_merge = pd.merge(grouped_carbu[['ident_men', 'prix_carbu']], grouped_alime[['ident_men'] +
['prix_alime']], on = 'ident_men')
df_prix_to_merge = pd.merge(df_prix_to_merge, grouped_autre[['ident_men', 'prix_autre']], on = 'ident_men')
df_prix_to_merge = pd.merge(df_prix_to_merge, grouped_logem[['ident_men', 'prix_logem']], on = 'ident_men')
del grouped, grouped_alime, grouped_autre, grouped_carbu, grouped_logem
# Problème: ceux qui ne consomment pas de carbu ou d'alimentaire se voient affecter un indice de prix égal à 0. Ils
# sont traités plus bas.
# On crée une variable dummy pour savoir si le ménage ne consomme que de l'électricité ou aussi du gaz.
# Si seulement électricité, elle est égale à 1.
aggregates_data_frame = electricite_only(aggregates_data_frame)
# On récupère les informations importantes sur les ménages, dont les variables démographiques
df_info_menage = aggregates_data_frame[['agepr', 'depenses_alime', 'depenses_autre', 'depenses_carbu',
'depenses_logem', 'depenses_tot', 'dip14pr', 'elect_only', 'ident_men', 'nenfants', 'nactifs', 'ocde10',
'revtot', 'situacj', 'situapr', 'stalog', 'strate', 'typmen', 'vag', 'veh_diesel',
'veh_essence']].copy()
df_info_menage['ident_men'] = df_info_menage['ident_men'].astype(str)
df_info_menage['part_alime'] = df_info_menage['depenses_alime'] / df_info_menage['depenses_tot']
df_info_menage['part_autre'] = df_info_menage['depenses_autre'] / df_info_menage['depenses_tot']
df_info_menage['part_carbu'] = df_info_menage['depenses_carbu'] / df_info_menage['depenses_tot']
df_info_menage['part_logem'] = df_info_menage['depenses_logem'] / df_info_menage['depenses_tot']
# On merge les informations sur les caractéristiques du ménage et leurs consommations avec les indices de prix
# pondérés pour les deux catégories
dataframe = pd.merge(df_info_menage, df_prix_to_merge, on = 'ident_men')
del df_info_menage, df_prix_to_merge
# Pour ceux qui ne consomment pas de carburants, on leur associe le prix correspondant à leur vague d'enquête
price_carbu = df_indice_prix_produit[df_indice_prix_produit['indice_prix_produit'].str[13:16] == '722'].copy()
price_carbu['vag'] = price_carbu['indice_prix_produit'].str[17:].astype(int)
price_carbu = price_carbu[['vag', 'prix']]
price_carbu['prix'] = price_carbu['prix'].astype(float)
dataframe = pd.merge(dataframe, price_carbu, on = 'vag')
del price_carbu
dataframe.loc[dataframe['prix_carbu'] == 0, 'prix_carbu'] = dataframe['prix']
dataframe['depenses_par_uc'] = dataframe['depenses_tot'] / dataframe['ocde10']
dataframe = dataframe[['ident_men', 'part_carbu', 'part_logem', 'part_alime', 'part_autre',
'prix_carbu', 'prix_logem', 'prix_alime', 'prix_autre', 'depenses_par_uc', 'depenses_tot',
'typmen', 'strate', 'dip14pr', 'agepr', 'situapr', 'situacj', 'stalog', 'nenfants',
'nactifs', 'vag', 'veh_diesel', 'veh_essence', 'elect_only']]
# On supprime de la base de données les individus pour lesquels on ne dispose d'aucune consommation alimentaire.
# Leur présence est susceptible de biaiser l'analyse puisque de toute évidence s'ils ne dépensent rien pour la
# nourriture ce n'est pas qu'ils n'en consomment pas, mais qu'ils n'en ont pas acheté sur la période (réserves, etc)
dataframe = dataframe[dataframe['prix_alime'] != 0]
dataframe = dataframe[dataframe['prix_logem'] != 0]
# On enlève les outliers, que l'on considère comme les individus dépensant plus de 25% de leur budget en carburants
# Cela correspond à 16 et 13 personnes pour 2000 et 2005 ce qui est négligeable, mais 153 i.e. 2% des consommateurs
# pour 2011 ce qui est assez important. Cette différence s'explique par la durée des enquêtes (1 semaine en 2011)
dataframe = dataframe[dataframe['part_carbu'] < 0.25]
indices_prix_carburants = indices_prix_carbus(year)
dataframe = pd.merge(dataframe, indices_prix_carburants, on = 'vag')
dataframe = price_carbu_pond(dataframe)
dataframe['year'] = year
dataframe = add_area_dummy(dataframe)
dataframe = add_stalog_dummy(dataframe)
dataframe = add_vag_dummy(dataframe)
data_frame_for_reg = dataframe.rename(columns = {'part_carbu': 'w1', 'part_logem': 'w2', 'part_alime': 'w3',
'part_autre': 'w4', 'prix_carbu': 'p1', 'prix_logem': 'p2', 'prix_alime': 'p3', 'prix_autre': 'p4'})
data_frame_all_years = pd.concat([data_frame_all_years, data_frame_for_reg])
data_frame_all_years.fillna(0, inplace = True)
data_frame_for_reg.to_csv(os.path.join(assets_directory, 'openfisca_france_indirect_taxation', 'assets',
'quaids', 'data_frame_energy_{}.csv'.format(year)), sep = ',')
data_frame_all_years.to_csv(os.path.join(assets_directory, 'openfisca_france_indirect_taxation', 'assets',
'quaids', 'data_frame_energy_all_years.csv'), sep = ',')
# Must correct what is useless, improve demographics : dip14
# dip14 : use only dip14pr (good proxy for dip14cj anyway), but change the nomenclature to have just 2 or 3 dummies
# describing whether they attended college or not, etc.
# Use more functions in utils
| agpl-3.0 |
rrohan/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
huzq/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 18 | 2530 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance. The example will throw a warning because it
doesn't converge, in this case this is what we want because of CI's time
constraints.
"""
import warnings
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.exceptions import ConvergenceWarning
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, random_state=1,
learning_rate_init=.1)
# this example won't converge because of CI's time constraints, so we catch the
# warning and are ignore it here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning,
module="sklearn")
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
deepmind/grid-cells | train.py | 1 | 10221 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Supervised training for the Grid cell network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
import numpy as np
import tensorflow as tf
import Tkinter # pylint: disable=unused-import
matplotlib.use('Agg')
import dataset_reader # pylint: disable=g-bad-import-order, g-import-not-at-top
import model # pylint: disable=g-bad-import-order
import scores # pylint: disable=g-bad-import-order
import utils # pylint: disable=g-bad-import-order
# Task config
tf.flags.DEFINE_string('task_dataset_info', 'square_room',
'Name of the room in which the experiment is performed.')
tf.flags.DEFINE_string('task_root',
None,
'Dataset path.')
tf.flags.DEFINE_float('task_env_size', 2.2,
'Environment size (meters).')
tf.flags.DEFINE_list('task_n_pc', [256],
'Number of target place cells.')
tf.flags.DEFINE_list('task_pc_scale', [0.01],
'Place cell standard deviation parameter (meters).')
tf.flags.DEFINE_list('task_n_hdc', [12],
'Number of target head direction cells.')
tf.flags.DEFINE_list('task_hdc_concentration', [20.],
'Head direction concentration parameter.')
tf.flags.DEFINE_integer('task_neurons_seed', 8341,
'Seeds.')
tf.flags.DEFINE_string('task_targets_type', 'softmax',
'Type of target, soft or hard.')
tf.flags.DEFINE_string('task_lstm_init_type', 'softmax',
'Type of LSTM initialisation, soft or hard.')
tf.flags.DEFINE_bool('task_velocity_inputs', True,
'Input velocity.')
tf.flags.DEFINE_list('task_velocity_noise', [0.0, 0.0, 0.0],
'Add noise to velocity.')
# Model config
tf.flags.DEFINE_integer('model_nh_lstm', 128, 'Number of hidden units in LSTM.')
tf.flags.DEFINE_integer('model_nh_bottleneck', 256,
'Number of hidden units in linear bottleneck.')
tf.flags.DEFINE_list('model_dropout_rates', [0.5],
'List of floats with dropout rates.')
tf.flags.DEFINE_float('model_weight_decay', 1e-5,
'Weight decay regularisation')
tf.flags.DEFINE_bool('model_bottleneck_has_bias', False,
'Whether to include a bias in linear bottleneck')
tf.flags.DEFINE_float('model_init_weight_disp', 0.0,
'Initial weight displacement.')
# Training config
tf.flags.DEFINE_integer('training_epochs', 1000, 'Number of training epochs.')
tf.flags.DEFINE_integer('training_steps_per_epoch', 1000,
'Number of optimization steps per epoch.')
tf.flags.DEFINE_integer('training_minibatch_size', 10,
'Size of the training minibatch.')
tf.flags.DEFINE_integer('training_evaluation_minibatch_size', 4000,
'Size of the minibatch during evaluation.')
tf.flags.DEFINE_string('training_clipping_function', 'utils.clip_all_gradients',
'Function for gradient clipping.')
tf.flags.DEFINE_float('training_clipping', 1e-5,
'The absolute value to clip by.')
tf.flags.DEFINE_string('training_optimizer_class', 'tf.train.RMSPropOptimizer',
'The optimizer used for training.')
tf.flags.DEFINE_string('training_optimizer_options',
'{"learning_rate": 1e-5, "momentum": 0.9}',
'Defines a dict with opts passed to the optimizer.')
# Store
tf.flags.DEFINE_string('saver_results_directory',
None,
'Path to directory for saving results.')
tf.flags.DEFINE_integer('saver_eval_time', 2,
'Frequency at which results are saved.')
# Require flags
tf.flags.mark_flag_as_required('task_root')
tf.flags.mark_flag_as_required('saver_results_directory')
FLAGS = tf.flags.FLAGS
def train():
"""Training loop."""
tf.reset_default_graph()
# Create the motion models for training and evaluation
data_reader = dataset_reader.DataReader(
FLAGS.task_dataset_info, root=FLAGS.task_root, num_threads=4)
train_traj = data_reader.read(batch_size=FLAGS.training_minibatch_size)
# Create the ensembles that provide targets during training
place_cell_ensembles = utils.get_place_cell_ensembles(
env_size=FLAGS.task_env_size,
neurons_seed=FLAGS.task_neurons_seed,
targets_type=FLAGS.task_targets_type,
lstm_init_type=FLAGS.task_lstm_init_type,
n_pc=FLAGS.task_n_pc,
pc_scale=FLAGS.task_pc_scale)
head_direction_ensembles = utils.get_head_direction_ensembles(
neurons_seed=FLAGS.task_neurons_seed,
targets_type=FLAGS.task_targets_type,
lstm_init_type=FLAGS.task_lstm_init_type,
n_hdc=FLAGS.task_n_hdc,
hdc_concentration=FLAGS.task_hdc_concentration)
target_ensembles = place_cell_ensembles + head_direction_ensembles
# Model creation
rnn_core = model.GridCellsRNNCell(
target_ensembles=target_ensembles,
nh_lstm=FLAGS.model_nh_lstm,
nh_bottleneck=FLAGS.model_nh_bottleneck,
dropoutrates_bottleneck=np.array(FLAGS.model_dropout_rates),
bottleneck_weight_decay=FLAGS.model_weight_decay,
bottleneck_has_bias=FLAGS.model_bottleneck_has_bias,
init_weight_disp=FLAGS.model_init_weight_disp)
rnn = model.GridCellsRNN(rnn_core, FLAGS.model_nh_lstm)
# Get a trajectory batch
input_tensors = []
init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
if FLAGS.task_velocity_inputs:
# Add the required amount of noise to the velocities
vel_noise = tf.distributions.Normal(0.0, 1.0).sample(
sample_shape=ego_vel.get_shape()) * FLAGS.task_velocity_noise
input_tensors = [ego_vel + vel_noise] + input_tensors
# Concatenate all inputs
inputs = tf.concat(input_tensors, axis=2)
# Replace euclidean positions and angles by encoding of place and hd ensembles
# Note that the initial_conds will be zeros if the ensembles were configured
# to provide that type of initialization
initial_conds = utils.encode_initial_conditions(
init_pos, init_hd, place_cell_ensembles, head_direction_ensembles)
# Encode targets as well
ensembles_targets = utils.encode_targets(
target_pos, target_hd, place_cell_ensembles, head_direction_ensembles)
# Estimate future encoding of place and hd ensembles inputing egocentric vels
outputs, _ = rnn(initial_conds, inputs, training=True)
ensembles_logits, bottleneck, lstm_output = outputs
# Training loss
pc_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=ensembles_targets[0], logits=ensembles_logits[0], name='pc_loss')
hd_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=ensembles_targets[1], logits=ensembles_logits[1], name='hd_loss')
total_loss = pc_loss + hd_loss
train_loss = tf.reduce_mean(total_loss, name='train_loss')
# Optimisation ops
optimizer_class = eval(FLAGS.training_optimizer_class) # pylint: disable=eval-used
optimizer = optimizer_class(**eval(FLAGS.training_optimizer_options)) # pylint: disable=eval-used
grad = optimizer.compute_gradients(train_loss)
clip_gradient = eval(FLAGS.training_clipping_function) # pylint: disable=eval-used
clipped_grad = [
clip_gradient(g, var, FLAGS.training_clipping) for g, var in grad
]
train_op = optimizer.apply_gradients(clipped_grad)
# Store the grid scores
grid_scores = dict()
grid_scores['btln_60'] = np.zeros((FLAGS.model_nh_bottleneck,))
grid_scores['btln_90'] = np.zeros((FLAGS.model_nh_bottleneck,))
grid_scores['btln_60_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
grid_scores['btln_90_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
grid_scores['lstm_60'] = np.zeros((FLAGS.model_nh_lstm,))
grid_scores['lstm_90'] = np.zeros((FLAGS.model_nh_lstm,))
# Create scorer objects
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
masks_parameters)
with tf.train.SingularMonitoredSession() as sess:
for epoch in range(FLAGS.training_epochs):
loss_acc = list()
for _ in range(FLAGS.training_steps_per_epoch):
res = sess.run({'train_op': train_op, 'total_loss': train_loss})
loss_acc.append(res['total_loss'])
tf.logging.info('Epoch %i, mean loss %.5f, std loss %.5f', epoch,
np.mean(loss_acc), np.std(loss_acc))
if epoch % FLAGS.saver_eval_time == 0:
res = dict()
for _ in xrange(FLAGS.training_evaluation_minibatch_size //
FLAGS.training_minibatch_size):
mb_res = sess.run({
'bottleneck': bottleneck,
'lstm': lstm_output,
'pos_xy': target_pos
})
res = utils.concat_dict(res, mb_res)
# Store at the end of validation
filename = 'rates_and_sac_latest_hd.pdf'
grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
'btln_60_separation'], grid_scores[
'btln_90_separation'] = utils.get_scores_and_plot(
latest_epoch_scorer, res['pos_xy'], res['bottleneck'],
FLAGS.saver_results_directory, filename)
def main(unused_argv):
tf.logging.set_verbosity(3) # Print INFO log messages.
train()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/sparse/test_series.py | 7 | 51517 | # pylint: disable-msg=E1101,W0612
import operator
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range
from pandas.core.common import isnull
from pandas.tseries.offsets import BDay
import pandas.util.testing as tm
from pandas.compat import range
from pandas import compat
from pandas.core.reshape.util import cartesian_product
import pandas.core.sparse.frame as spf
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.core.sparse.api import SparseSeries
from pandas.tests.series.test_api import SharedWithSparse
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
class TestSparseSeries(SharedWithSparse):
def setup_method(self, method):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer',
name='iseries')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_constructor_dtype(self):
arr = SparseSeries([np.nan, 1, 2, np.nan])
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == np.float64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
assert arr.dtype == np.int64
assert np.isnan(arr.fill_value)
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
tm.assert_sp_series_equal(df['col'], self.bseries, check_names=False)
result = df.iloc[:, 0]
tm.assert_sp_series_equal(result, self.bseries, check_names=False)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
tm.assert_series_equal(expected, result)
def test_constructor_preserve_attr(self):
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == np.int64
assert arr.fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == np.int64
assert s.fill_value == 0
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
assert density == 4 / 10.0
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='bseries'))
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
series = self.bseries.to_dense(sparse_only=True)
indexer = np.isfinite(arr)
exp = Series(arr[indexer], index=index[indexer], name='bseries')
tm.assert_series_equal(series, exp)
series = self.iseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='iseries'))
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='zbseries'))
series = self.ziseries.to_dense()
tm.assert_series_equal(series, Series(arr))
def test_to_dense_fill_value(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([1, np.nan, 0, 3, 0])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
tm.assert_sp_series_equal(bseries, self.bseries)
tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)
assert iseries.name == self.bseries.name
assert len(series) == len(bseries)
assert len(series) == len(iseries)
assert series.shape == bseries.shape
assert series.shape == iseries.shape
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
tm.assert_sp_series_equal(zbseries, self.zbseries)
tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)
assert ziseries.name == self.zbseries.name
assert len(series) == len(zbseries)
assert len(series) == len(ziseries)
assert series.shape == zbseries.shape
assert series.shape == ziseries.shape
def test_to_dense_preserve_name(self):
assert (self.bseries.name is not None)
result = self.bseries.to_dense()
assert result.name == self.bseries.name
def test_constructor(self):
# test setup guys
assert np.isnan(self.bseries.fill_value)
assert isinstance(self.bseries.sp_index, BlockIndex)
assert np.isnan(self.iseries.fill_value)
assert isinstance(self.iseries.sp_index, IntIndex)
assert self.zbseries.fill_value == 0
tm.assert_numpy_array_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
def _check_const(sparse, name):
# use passed series name
result = SparseSeries(sparse)
tm.assert_sp_series_equal(result, sparse)
assert sparse.name == name
assert result.name == name
# use passed name
result = SparseSeries(sparse, name='x')
tm.assert_sp_series_equal(result, sparse, check_names=False)
assert result.name == 'x'
_check_const(self.bseries, 'bseries')
_check_const(self.iseries, 'iseries')
_check_const(self.zbseries, 'zbseries')
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
assert isinstance(s5, SparseSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
assert (sp.loc[:99] == data).all()
assert isnull(sp.loc[100:]).all()
data = np.nan
sp = SparseSeries(data, np.arange(100))
assert len(sp) == 100
assert sp.shape == (100, )
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
tm.assert_numpy_array_equal(sp_series.values.values, np.array(arr))
assert len(sp_series) == 5
assert sp_series.shape == (5, )
def test_constructor_empty(self):
# see gh-9272
sp = SparseSeries()
assert len(sp.index) == 0
assert sp.shape == (0, )
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
assert cop is not self.bseries
assert cop.sp_index is self.bseries.sp_index
assert cop.dtype == np.float64
cop2 = self.iseries.copy()
tm.assert_sp_series_equal(cop, self.bseries)
tm.assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
assert cop.sp_values[0] == 97
assert self.bseries.sp_values[0] != 97
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
tm.assert_sp_series_equal(zbcop, self.zbseries)
tm.assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
assert (self.bseries.sp_values[:5] == 5).all()
def test_shape(self):
# see gh-10452
assert self.bseries.shape == (20, )
assert self.btseries.shape == (20, )
assert self.iseries.shape == (20, )
assert self.bseries2.shape == (15, )
assert self.iseries2.shape == (15, )
assert self.zbseries2.shape == (15, )
assert self.ziseries2.shape == (15, )
def test_astype(self):
with pytest.raises(ValueError):
self.bseries.astype(np.int64)
def test_astype_all(self):
orig = pd.Series(np.array([1, 2, 3]))
s = SparseSeries(orig)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = s.astype(typ)
assert res.dtype == typ
tm.assert_series_equal(res.to_dense(), orig.astype(typ))
def test_kind(self):
assert self.bseries.kind == 'block'
assert self.iseries.kind == 'integer'
def test_to_frame(self):
# GH 9850
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},
default_fill_value=0)
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)
def test_pickle(self):
def _test_roundtrip(series):
unpickled = tm.round_trip_pickle(series)
tm.assert_sp_series_equal(series, unpickled)
tm.assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
tm.assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
tm.assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
pytest.raises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
pytest.raises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
assert self.bseries.get(len(self.bseries) + 1) is None
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
tm.assert_almost_equal(result, expected)
tm.assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
self.btseries.set_value(idx, 0)
assert self.btseries[idx] == 0
self.iseries.set_value('foobar', 0)
assert self.iseries.index[-1] == 'foobar'
assert self.iseries['foobar'] == 0
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
assert isinstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
tm.assert_sp_series_equal(res, expected)
res = self.bseries[:5]
assert isinstance(res, SparseSeries)
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
assert isinstance(sparse_result, SparseSeries)
tm.assert_almost_equal(dense_result,
sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
pytest.raises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10) * nan)
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp)
def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
np.take(sp.to_dense(), indices, axis=0))
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, out=np.empty(sp.shape))
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, mode='clip')
def test_setitem(self):
self.bseries[5] = 7.
assert self.bseries[5] == 7.
def test_setslice(self):
self.bseries[5:10] = 7.
tm.assert_series_equal(self.bseries[5:10].to_dense(),
Series(7., index=range(5, 10),
name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
tm.assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
tm.assert_sp_series_equal(result, self.bseries + self.bseries)
def test_binary_operators(self):
# skipping for now #####
import pytest
pytest.skip("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
tm.assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, "i%s" % op),
getattr(operator, op))
def test_abs(self):
s = SparseSeries([1, 2, -3], name='x')
expected = SparseSeries([1, 2, 3], name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')
expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,
fill_value=2, name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
tm.assert_sp_series_equal(spsre, seriesre)
tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
tm.assert_sp_series_equal(self.bseries, same_index)
assert same_index is not self.bseries
# corner cases
sp = SparseSeries([], index=[])
# TODO: sp_zero is not used anywhere...remove?
sp_zero = SparseSeries([], index=[], fill_value=0) # noqa
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
assert (self.bseries.sp_values != 1.).all()
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
assert reindexed.sp_index is index2
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
tm.assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
# TODO: expected is not used anywhere...remove?
expected = expected.reindex(int_indices2).fillna(fill_value) # noqa
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1,
sparse_index=IntIndex(length, index1),
fill_value=nan)
with tm.assert_raises_regex(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0) # noqa
def test_repr(self):
# TODO: These aren't used
bsrepr = repr(self.bseries) # noqa
isrepr = repr(self.iseries) # noqa
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
assert sparse_result == dense_result
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')
tm.assert_sp_array_equal(sp_valid.values, exp_arr)
tm.assert_index_equal(sp_valid.index, expected.index)
assert len(sp_valid.sp_values) == 2
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
assert not isinstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx, fill_value=np.nan)
# homogenized is only valid with NaN fill values
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert (v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
with tm.assert_raises_regex(TypeError, "NaN fill value"):
spf.homogenize(data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
assert np.isnan(result.fill_value)
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
# 1 / 0 is inf
assert np.isinf(result.fill_value)
def test_fill_value_when_combine_const(self):
# GH12723
s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))
shifted = series.shift(0)
assert shifted is not series
tm.assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=BDay())
_dense_series_compare(series, f)
def test_shift_nan(self):
# GH 12908
orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=0))
def test_shift_dtype(self):
# GH 12908
orig = pd.Series([1, 2, 3, 4], dtype=np.int64)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
sparse = orig.to_sparse(fill_value=np.nan)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=np.nan))
# shift(1) or more span changes dtype to float64
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
def test_shift_dtype_fill_value(self):
# GH 12908
orig = pd.Series([1, 0, 0, 4], dtype=np.int64)
for v in [0, 1, np.nan]:
sparse = orig.to_sparse(fill_value=v)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=v))
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
tm.assert_sp_series_equal(result, result2)
tm.assert_sp_series_equal(result, expected)
class TestSparseHandlingMultiIndexes(object):
def setup_method(self, method):
miindex = pd.MultiIndex.from_product(
[["x", "y"], ["10", "20"]], names=['row-foo', 'row-bar'])
micol = pd.MultiIndex.from_product(
[['a', 'b', 'c'], ["1", "2"]], names=['col-foo', 'col-bar'])
dense_multiindex_frame = pd.DataFrame(
index=miindex, columns=micol).sort_index().sort_index(axis=1)
self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)
def test_to_sparse_preserve_multiindex_names_columns(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
sparse_multiindex_frame = sparse_multiindex_frame.copy()
tm.assert_index_equal(sparse_multiindex_frame.columns,
self.dense_multiindex_frame.columns)
def test_round_trip_preserve_multiindex_names(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()
tm.assert_frame_equal(self.dense_multiindex_frame,
round_trip_multiindex_frame,
check_column_type=True,
check_names=True)
class TestSparseSeriesScipyInteraction(object):
# Issue 8048: add SparseSeries coo methods
def setup_method(self, method):
tm._skip_if_no_scipy()
import scipy.sparse
# SparseSeries inputs used in tests, the tests rely on the order
self.sparse_series = []
s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
self.sparse_series.append(s.to_sparse())
ss = self.sparse_series[0].copy()
ss.index.names = [3, 0, 1, 2]
self.sparse_series.append(ss)
ss = pd.Series([
nan
] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()
for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
ss[k] = v
self.sparse_series.append(ss)
# results used in tests
self.coo_matrices = []
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))
self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],
[(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]
self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]
def test_to_coo_text_names_integer_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_integer_row_levels_sort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1],
'column_levels': [2, 3],
'sort_labels': True}
result = (self.coo_matrices[1], self.ils[1], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B', 'C'],
'column_levels': ['D'],
'sort_labels': False}
result = (self.coo_matrices[2], self.ils[2], self.jls[1])
self._run_test(ss, kwargs, result)
def test_to_coo_integer_names_integer_row_levels_nosort(self):
ss = self.sparse_series[1]
kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
pytest.raises(ValueError, ss.to_coo)
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat([self.sparse_series[0],
self.sparse_series[0]]).to_sparse()
pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
check = self.sparse_series[2]
tm.assert_sp_series_equal(ss, check)
def test_from_coo_nodense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
check = self.sparse_series[2]
check = check.dropna().to_sparse()
tm.assert_sp_series_equal(ss, check)
def test_from_coo_long_repr(self):
# GH 13114
# test it doesn't raise error. Formatting is tested in test_format
tm._skip_if_no_scipy()
import scipy.sparse
sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))
repr(sparse)
def _run_test(self, ss, kwargs, check):
results = ss.to_coo(**kwargs)
self._check_results_to_coo(results, check)
# for every test, also test symmetry property (transpose), switch
# row_levels and column_levels
d = kwargs.copy()
d['row_levels'] = kwargs['column_levels']
d['column_levels'] = kwargs['row_levels']
results = ss.to_coo(**d)
results = (results[0].T, results[2], results[1])
self._check_results_to_coo(results, check)
def _check_results_to_coo(self, results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
tm.assert_numpy_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
assert il == il_result
assert jl == jl_result
def test_concat(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_sparse_dense(self):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_value_counts(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_dup(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
# numeric op may cause sp_values to include the same value as
# fill_value
dense = pd.Series(vals, name='xx') / 0.
sparse = pd.SparseSeries(vals, name='xx') / 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx') * 0.
sparse = pd.SparseSeries(vals, name='xx') * 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_int(self):
vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
# fill_value is np.nan, but should not be included in the result
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_isnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnull()
exp = pd.SparseSeries([True, True, False, False, True], name='xxx',
fill_value=True)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnull()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([True, False, False, False, False], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def test_isnotnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnotnull()
exp = pd.SparseSeries([False, False, True, True, False], name='xxx',
fill_value=False)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnotnull()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([False, True, True, True, True], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def _dense_series_compare(s, f):
result = f(s)
assert (isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
tm.assert_series_equal(result.to_dense(), dense_result)
class TestSparseSeriesAnalytics(object):
def setup_method(self, method):
arr, index = _test_data1()
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
def test_cumsum(self):
result = self.bseries.cumsum()
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
axis = 1 # Series is 1-D, so only axis = 0 is valid.
msg = "No axis named {axis}".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
self.bseries.cumsum(axis=axis)
def test_numpy_cumsum(self):
result = np.cumsum(self.bseries)
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = np.cumsum(self.zbseries)
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.bseries, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.zbseries, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var', 'mean',
'prod', 'cumprod', 'std', 'argsort',
'argmin', 'argmax', 'min', 'max']
for func in funcs:
for series in ('bseries', 'zbseries'):
getattr(np, func)(getattr(self, series))
| mit |
magic-sph/pizza | python/pizza/movie.py | 1 | 5544 | # -*- coding: utf-8 -*-
import os
import numpy as np
from .plotlib import equatContour
from .libpizza import spec_spat, scanDir, symmetrize
from .frame import Frame
import matplotlib.pyplot as plt
class PizzaMovie:
def __init__(self, field='up', nvar='all', datadir='.', tag=None, step=1,
endian='l', levels=64, cmap='seismic', deminc=True, png=False,
cut=1., bgcolor=None, dpi=100, normed=False, rm_hor_avg=False):
if png:
plt.ioff()
if not os.path.exists('movie'):
os.mkdir('movie')
else:
plt.ion()
if field in ('om', 'vortz', 'vort', 'omega', 'Vorticity', 'Omega'):
st = 'om'
elif field in ('temperature', 'Temperature', 'temp', 'Temp', 't', 'T'):
st = 'temp'
elif field in ('us', 'Us', 'ur', 'Ur', 'vs', 'Vs', 'Vr', 'vr'):
st = 'us'
elif field in ('up', 'Up', 'uphi', 'Uphi', 'vp', 'Vp', 'Vphi', 'vphi'):
st = 'up'
# Determine the file name pattern
if tag is not None:
pattern = 'frame_%s_*.%s' % (st, tag)
else:
pattern = 'frame_%s_*' % st
pattern = os.path.join(datadir, pattern)
# Assemble a list of files that match the pattern
files = scanDir(pattern)
# Only every steps
files = files[::step]
# Only the last nvar files
if type(nvar) == int:
if nvar <= len(files):
files = files[-nvar:]
for k, file in enumerate(files):
if png:
filename = 'movie/img%05d.png' % k
if os.path.exists(filename) and k != 0:
continue
f = Frame(file, endian=endian)
dat = spec_spat(f.field_m, f.n_phi_max)
if deminc:
dat = symmetrize(dat, ms=f.minc)
if rm_hor_avg:
dat = dat - np.mean(dat, axis=0)
if k == 0:
self.ra = f.ra
self.ek = f.ek
self.pr = f.pr
self.radratio = f.radratio
self.sc = f.sc
self.raxi = f.raxi
self.n_r_max = f.n_r_max
self.n_m_max = f.n_m_max
self.m_max = f.m_max
self.n_phi_max = f.n_phi_max
self.minc = f.minc
self.radius = f.radius
self.tcond = f.tcond
self.idx2m = f.idx2m
if deminc:
phi = np.linspace(0., 2.*np.pi, dat.shape[0])
else:
phi = np.linspace(0., 2.*np.pi/self.minc, dat.shape[0])
vmin = -max(abs(dat.max()), abs(dat.min()))
vmin = cut * vmin
vmax = -vmin
cs = np.linspace(vmin, vmax, levels)
fig, xx, yy = equatContour(dat, self.radius, minc=self.minc,
levels=levels, cm=cmap, vmin=vmin,
vmax=vmax, deminc=deminc,
cbar=False)
man = plt.get_current_fig_manager()
man.canvas.draw()
else:
if not png:
print(k)
plt.cla()
if normed:
vmin = -max(abs(dat.max()), abs(dat.min()))
vmin = cut * vmin
vmax = -vmin
cs = np.linspace(vmin, vmax, levels)
ax = fig.axes[0]
ax.contourf(xx, yy, dat, cs, cmap=plt.get_cmap(cmap),
extend='both')
ax.plot(self.radius[0]*np.cos(phi), self.radius[0]*np.sin(phi),
'k-')
ax.plot(self.radius[-1]*np.cos(phi),
self.radius[-1]*np.sin(phi), 'k-')
if not deminc and self.minc > 1:
ax.plot(self.radius, np.zeros_like(self.radius), 'k-',
lw=1.5)
xa = self.radius[-1]*np.cos(2.*np.pi/self.minc)
ya = self.radius[-1]*np.sin(2.*np.pi/self.minc)
xb = self.radius[0]*np.cos(2.*np.pi/self.minc)
x = np.linspace(xa, xb, 32)
y = np.tan(2.*np.pi/self.minc)*(x-xa)+ya
ax.plot(x, y, 'k-', lw=1.5)
ax.plot(self.radius, np.zeros_like(self.radius), 'k-',
lw=1.5)
ax.axis('off')
if xx.min() < 0:
ax.set_xlim(1.01*xx.min(), 1.01*xx.max())
elif xx.min() == 0.:
ax.set_xlim(xx.min()-0.01, 1.01*xx.max())
else:
ax.set_xlim(0.99*xx.min(), 1.01*xx.max())
if yy.min() < 0:
ax.set_ylim(1.01*yy.min(), 1.01*yy.max())
elif yy.min() == 0.:
ax.set_ylim(yy.min()-0.01, 1.01*yy.max())
else:
ax.set_ylim(0.99*yy.min(), 1.01*yy.max())
man.canvas.draw()
if png:
filename = 'movie/img%05d.png' % k
print('write %s' % filename)
# st = 'echo %i' % ivar + ' > movie/imgmax'
if bgcolor is not None:
fig.savefig(filename, facecolor=bgcolor, dpi=dpi)
else:
fig.savefig(filename, dpi=dpi)
| gpl-3.0 |
manashmndl/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
ToFuProject/tofu | tofu/geom/_comp.py | 1 | 44923 | """
This module is the computational part of the geometrical module of ToFu
"""
# Built-in
import os
import warnings
from xml.dom import minidom
# Common
import numpy as np
import scipy.interpolate as scpinterp
import scipy.integrate as scpintg
from inspect import signature as insp
# ToFu-specific
try:
import tofu.geom._def as _def
import tofu.geom._GG as _GG
except Exception:
from . import _def as _def
from . import _GG as _GG
_LTYPES = [int, float, np.int_, np.float_]
_RES = 0.1
###############################################################################
# Default parameters
###############################################################################
_SAMPLE_RES = {
'edge': 0.02,
'cross': 0.1,
'surface': 0.1,
'volume': 0.1,
}
_SAMPLE_RESMODE = {
'edge': 'abs',
'cross': 'abs',
'surface': 'abs',
'volume': 'abs',
}
def _check_float(var=None, varname=None, vardef=None):
if var is None:
var = vardef
if not type(var) in _LTYPES:
msg = (
"Arg {} must be a float!\n".format(varname)
+ "Provided: {}".format(type(var))
)
raise Exception(msg)
return var
###############################################################################
# Ves functions
###############################################################################
# ==============================================================================
# Interfacing functions
# ==============================================================================
def _get_pts_from_path_svg(
path_str=None,
res=None,
):
# Check inputs
res = _check_float(var=res, varname='res', vardef=_RES)
# try loading
try:
from svg.path import parse_path
except Exception as err:
msg = (
str(err)
+ "\n\nYou do not seem to have svg.path installed\n"
+ "It is an optional dependency only used for this method\n"
+ "To use from_svg(), please install svg.path using:\n"
+ "\tpip install svg.path"
)
raise Exception(msg)
lpath = parse_path(path_str)
lpath._calc_lengths()
fract = lpath._fractions
pos = []
for ii, pat in enumerate(lpath):
if pat.__class__.__name__ == 'Line':
pos.append(np.r_[fract[ii]])
elif pat.__class__.__name__ == 'Move':
pos.append(np.r_[fract[ii]])
elif pat.__class__.__name__ == 'Close':
pos.append(np.r_[fract[ii]])
else:
npts = int(np.ceil(pat.length() / res))
pos.append(
np.linspace(fract[ii], fract[ii+1], npts, endpoint=False)
)
pos = np.unique(np.concatenate(pos))
ind1 = np.abs(pos-1.) < 1e-14
if np.sum(ind1) == 1:
pos[ind1] = 1.
elif np.sum(ind1) > 1:
msg = "Several 1!"
raise Exception(msg)
pts = np.array([lpath.point(po) for po in pos])
pts = np.array([pts.real, pts.imag])
# Check for reference line
isref = False
if 'z' not in path_str.lower():
if pts.shape[1] == 2:
isref = True
else:
msg = (
"Non-conform path ({}) identified!\n"
+ "All path must be either:\n"
+ "\t- closed\n"
+ "\t- or a unique straight line with 2 points\n"
)
raise Exception(msg)
return pts, isref
def get_paths_from_svg(
pfe=None,
res=None,
r0=None,
z0=None,
point_ref1=None,
point_ref2=None,
length_ref=None,
scale=None,
verb=None,
):
# check input
c0 = isinstance(pfe, str) and os.path.isfile(pfe) and pfe.endswith('.svg')
if not c0:
msg = (
"Arg pfe should be a path to a valid .svg file!\n"
+ "Provided:\n\t{}".format(pfe)
)
raise Exception(msg)
pfe = os.path.abspath(pfe)
# r0, z0, scale
z0 = _check_float(var=z0, varname='z0', vardef=0.)
r0 = _check_float(var=r0, varname='r0', vardef=0.)
scale = _check_float(var=scale, varname='scale', vardef=1.)
# verb
if verb is None:
verb = True
if not isinstance(verb, bool):
msg = (
"Arg verb must be a bool!\n"
+ "Provided:\n\t{}".format(verb)
)
raise Exception(msg)
# Predefine useful var
doc = minidom.parse(pfe)
# Try extract raw data
try:
dpath = {
path.getAttribute('id').replace('\n', '').replace('""', ''): {
'poly': path.getAttribute('d'),
'color': path.getAttribute('style')
}
for path in doc.getElementsByTagName('path')
}
except Exception as err:
msg = (
"Could not extract path coordinates from {}".format(pfe)
)
raise Exception(msg)
# Derive usable data
kstr = 'fill:'
lk = list(dpath.keys())
ref = None
for ii, k0 in enumerate(lk):
v0 = dpath[k0]
poly, isref = _get_pts_from_path_svg(v0['poly'], res=res)
if isref is True:
ref = poly
del dpath[k0]
continue
dpath[k0]['poly'] = poly
# class and color
color = v0['color'][v0['color'].index(kstr) + len(kstr):].split(';')[0]
if color == 'none':
dpath[k0]['cls'] = 'Ves'
color = None
else:
dpath[k0]['cls'] = 'PFC'
dpath[k0]['color'] = color
# Check for negative r
lkneg = [k0 for k0, v0 in dpath.items() if np.any(v0['poly'][0, :] <= 0.)]
if len(lkneg) > 0.:
lstr = ['\t- {}'.format(k0) for k0 in lkneg]
msg = (
"With the chosen r0 ({}) some structure have negative r values\n"
+ "This is impossible in a toroidal coordinate system\n"
+ " => the following structures are removed:\n"
+ "\n".join(lstr)
)
if len(lkneg) == len(dpath):
raise Exception(msg)
else:
warnings.warn(msg)
dpath = {k0: dpath[k0] for k0 in dpath.keys() if k0 not in lkneg}
# Set origin and rescale
if ref is not None:
lc = [
point_ref1 is not None and point_ref2 is not None,
point_ref1 is not None and length_ref is not None,
]
if not any(lc):
msg = (
"Arg reference line for scaling has been detected!\n"
+ "But it cannot be used without providing:\n"
+ "\t- point_ref1 + point_ref2: iterables of len() = 2\n"
+ "\t- point_Ref1 + length_ref: iterable len() = 2 + scalar\n"
)
warnings.warn(msg)
else:
unit = np.diff(ref, axis=1)
unit = unit / np.linalg.norm(unit)
unit = np.array([[unit[0, 0]], [-unit[1, 0]]])
if not lc[0]:
point_ref2 = np.array(point_ref1)[:, None] + length_ref*unit
# if horizontal (resp. vertical line) => coef = inf
# => assume equal scale for r and z instead to avoid inf
eps = 1.e-8
if np.abs(unit[0, 0]) > eps:
r_coef = (
(point_ref2[0]-point_ref1[0]) / (ref[0, 1] - ref[0, 0])
)
if np.abs(unit[1, 0]) > eps:
z_coef = (
(point_ref2[1]-point_ref1[1]) / (ref[1, 1] - ref[1, 0])
)
if np.abs(unit[0, 0]) < eps:
# vertical line => assume rscale = zscale
r_coef = -z_coef
if np.abs(unit[1, 0]) < eps:
# horizontal line => assume rscale = zscale
z_coef = -r_coef
r_offset = point_ref1[0] - r_coef*ref[0, 0]
z_offset = point_ref1[1] - z_coef*ref[1, 0]
for k0 in dpath.keys():
dpath[k0]['poly'] = np.array([
r_coef*dpath[k0]['poly'][0, :] + r_offset,
z_coef*dpath[k0]['poly'][1, :] + z_offset,
])
else:
for k0 in dpath.keys():
dpath[k0]['poly'] = np.array([
scale*(dpath[k0]['poly'][0, :] - r0),
scale*(-dpath[k0]['poly'][1, :] - z0),
])
# verb
if verb is True:
lVes = sorted([k0 for k0, v0 in dpath.items() if v0['cls'] == 'Ves'])
lPFC = sorted([k0 for k0, v0 in dpath.items() if v0['cls'] == 'PFC'])
lobj = [
'\t- {}: {} ({} pts, {})'.format(
dpath[k0]['cls'], k0,
dpath[k0]['poly'].shape[1], dpath[k0]['color'],
)
for k0 in lVes + lPFC
]
msg = (
"The following structures were loaded:\n".format(pfe)
+ "\n".join(lobj)
+ "\nfrom {}".format(pfe)
)
print(msg)
return dpath
# ==============================================================================
# = Ves sub-functions
# ==============================================================================
def _Struct_set_Poly(
Poly, pos=None, extent=None, arrayorder="C", Type="Tor", Clock=False
):
""" Compute geometrical attributes of a Struct object """
# Make Poly closed, counter-clockwise, with '(cc,N)' layout and arrayorder
try:
Poly = _GG.format_poly(Poly, order="C", Clock=False, close=True,
Test=True)
except Exception as excp:
print(excp)
assert Poly.shape[0] == 2, "Arg Poly must be a 2D polygon !"
fPfmt = np.ascontiguousarray if arrayorder == "C" else np.asfortranarray
# Get all remarkable points and moments
NP = Poly.shape[1] - 1
P1Max = Poly[:, np.argmax(Poly[0, :])]
P1Min = Poly[:, np.argmin(Poly[0, :])]
P2Max = Poly[:, np.argmax(Poly[1, :])]
P2Min = Poly[:, np.argmin(Poly[1, :])]
BaryP = np.sum(Poly[:, :-1], axis=1, keepdims=False) / (Poly.shape[1] - 1)
BaryL = np.array(
[(P1Max[0] + P1Min[0]) / 2.0, (P2Max[1] + P2Min[1]) / 2.0]
)
BaryS, Surf = _GG.poly_area_and_barycenter(Poly, NP)
# Get lim-related indicators
noccur = int(pos.size)
Multi = noccur > 1
# Get Tor-related quantities
if Type.lower() == "lin":
Vol, BaryV = None, None
else:
Vol, BaryV = _GG.Poly_VolAngTor(Poly)
if Vol <= 0.0:
msg = ("Pb. with volume computation for Struct of type 'Tor' !\n"
+ "\t- Vol = {}\n".format(Vol)
+ "\t- Poly = {}\n\n".format(str(Poly))
+ " => Probably corrupted polygon\n"
+ " => Please check polygon is not self-intersecting")
raise Exception(msg)
# Compute the non-normalized vector of each side of the Poly
Vect = np.diff(Poly, n=1, axis=1)
Vect = fPfmt(Vect)
# Compute the normalised vectors directed inwards
Vin = np.array([Vect[1, :], -Vect[0, :]])
Vin = -Vin # Poly is Counter Clock-wise as defined above
Vin = Vin / np.hypot(Vin[0, :], Vin[1, :])[np.newaxis, :]
Vin = fPfmt(Vin)
poly = _GG.format_poly(
Poly,
order=arrayorder,
Clock=Clock,
close=False,
Test=True,
)
# Get bounding circle
circC = BaryS
r = np.sqrt(np.sum((poly - circC[:, np.newaxis]) ** 2, axis=0))
circr = np.max(r)
dout = {
"Poly": poly,
"pos": pos,
"extent": extent,
"noccur": noccur,
"Multi": Multi,
"nP": NP,
"P1Max": P1Max,
"P1Min": P1Min,
"P2Max": P2Max,
"P2Min": P2Min,
"BaryP": BaryP,
"BaryL": BaryL,
"BaryS": BaryS,
"BaryV": BaryV,
"Surf": Surf,
"VolAng": Vol,
"Vect": Vect,
"VIn": Vin,
"circ-C": circC,
"circ-r": circr,
"Clock": Clock,
}
return dout
def _Ves_get_InsideConvexPoly(
Poly,
P2Min,
P2Max,
BaryS,
RelOff=_def.TorRelOff,
ZLim="Def",
Spline=True,
Splprms=_def.TorSplprms,
NP=_def.TorInsideNP,
Plot=False,
Test=True,
):
if Test:
assert type(RelOff) is float, "Arg RelOff must be a float"
assert (
ZLim is None or ZLim == "Def" or type(ZLim) in [tuple, list]
), "Arg ZLim must be a tuple (ZlimMin, ZLimMax)"
assert type(Spline) is bool, "Arg Spline must be a bool !"
if ZLim is not None:
if ZLim == "Def":
ZLim = (
P2Min[1] + 0.1 * (P2Max[1] - P2Min[1]),
P2Max[1] - 0.05 * (P2Max[1] - P2Min[1]),
)
indZLim = (Poly[1, :] < ZLim[0]) | (Poly[1, :] > ZLim[1])
if Poly.shape[1] - indZLim.sum() < 10:
msg = "Poly seems to be Convex and simple enough !"
msg += "\n Poly.shape[1] - indZLim.sum() < 10"
warnings.warn(msg)
return Poly
Poly = np.delete(Poly, indZLim.nonzero()[0], axis=1)
if np.all(Poly[:, 0] == Poly[:, -1]):
Poly = Poly[:, :-1]
Np = Poly.shape[1]
if Spline:
BarySbis = np.tile(BaryS, (Np, 1)).T
Ptemp = (1.0 - RelOff) * (Poly - BarySbis)
# Poly = BarySbis + Ptemp
Ang = np.arctan2(Ptemp[1, :], Ptemp[0, :])
Ang, ind = np.unique(Ang, return_index=True)
Ptemp = Ptemp[:, ind]
# spline parameters
ww = Splprms[0] * np.ones((Np + 1,))
ss = Splprms[1] * (Np + 1) # smoothness parameter
kk = Splprms[2] # spline order
nest = int(
(Np + 1) / 2.0
) # estimate of number of knots needed (-1 = maximal)
# Find the knot points
# TODO @DV : we can probably get rid of this
# tckp,uu = scpinterp.splprep([np.append(Ptemp[0,:],Ptemp[0,0]),
# np.append(Ptemp[1,:],Ptemp[1,0]),np.append(Ang,Ang[0]+2.*np.pi)],
# w=ww, s=ss, k=kk, nest=nest)
tckp, uu = scpinterp.splprep(
[
np.append(Ptemp[0, :], Ptemp[0, 0]),
np.append(Ptemp[1, :], Ptemp[1, 0]),
],
u=np.append(Ang, Ang[0] + 2.0 * np.pi),
w=ww,
s=ss,
k=kk,
nest=nest,
full_output=0,
)
xnew, ynew = scpinterp.splev(np.linspace(-np.pi, np.pi, NP), tckp)
Poly = np.array([xnew + BaryS[0], ynew + BaryS[1]])
Poly = np.concatenate((Poly, Poly[:, 0:1]), axis=1)
if Plot:
import matplotlib.pyplot as plt
f = plt.figure(facecolor="w", figsize=(8, 10))
ax = f.add_axes([0.1, 0.1, 0.8, 0.8])
ax.plot(Poly[0, :], Poly[1, :], "-k", Poly[0, :], Poly[1, :], "-r")
ax.set_aspect(aspect="equal", adjustable="datalim"), ax.set_xlabel(
r"R (m)"
), ax.set_ylabel(r"Z (m)")
f.canvas.draw()
return Poly
# ==============================================================================
# = Ves sampling functions
# ==============================================================================
def _Ves_get_sample_checkinputs(
res=None,
domain=None,
resMode=None,
ind=None,
which='volume'
):
""" Check inputs for all sampling routines """
# res
dres = {'edge': 1, 'cross': 2, 'surface': 2, 'volume': 3}
if res is None:
res = _SAMPLE_RES[which]
ltypes = [int, float, np.int_, np.float_]
c0 = (type(res) in ltypes
or (hasattr(res, "__iter__")
and len(res) == dres[which]
and all([type(ds) in ltypes for ds in res])))
if not c0:
msg = ("Arg res must be either:\n"
+ "\t- float: unique resolution for all directions\n"
+ "\t- iterable of {} floats\n".format(dres[which])
+ " You provided:\n{}".format(res))
raise Exception(msg)
if type(res) in ltypes:
if which != 'edge':
res = [float(res) for ii in range(dres[which])]
else:
if which == 'edge':
msg = ("For edge, res cannot be an iterable!\n"
+ "\t- res: {}".format(res))
raise Exception(msg)
res = [float(res[ii]) for ii in range(dres[which])]
# domain (i.e.: sub-domain to be sampled, defined by its limits)
ddomain = {'edge': 2, 'cross': 2, 'surface': 3, 'volume': 3}
if domain is None:
domain = [None for ii in range(ddomain[which])]
c0 = (hasattr(domain, "__iter__")
and len(domain) == ddomain[which]
and all([dd is None
or (hasattr(dd, "__iter__")
and len(dd) == 2
and all([ss is None
or type(ss) in ltypes
for ss in dd]))
for dd in domain]))
if not c0:
msg = ("Arg domain must be a len()={} iterable".format(ddomain[which])
+ " where each element can be:\n"
+ "\t- an iterable 2 floats: [lower, upper] bounds\n"
+ "\t- None: no bounds\n"
+ " You provided:\n{}".format(domain))
raise Exception(msg)
for ii in range(len(domain)):
if domain[ii] is not None:
domain[ii] = [float(domain[ii][0])
if domain[ii][0] is not None else None,
float(domain[ii][1])
if domain[ii][1] is not None else None]
# resMode
if resMode is None:
resMode = _SAMPLE_RESMODE[which]
c0 = isinstance(resMode, str) and resMode.lower() in ["abs", "rel"]
if not c0:
msg = ("Arg resMode must be in ['abs','rel']!\n"
+ " You provided:\n{}".format(resMode))
raise Exception(msg)
resMode = resMode.lower()
# ind (indices of points to be recovered)
c0 = (ind is None
or (isinstance(ind, np.ndarray)
and ind.ndim == 1
and ind.dtype == np.int_
and np.all(ind >= 0))
or (which == 'surface'
and isinstance(ind, list)
and all([isinstance(indi, np.ndarray)
and indi.ndim == 1
and indi.dtype == np.int_
and np.all(indi >= 0) for indi in ind])))
if not c0:
msg = ("Arg ind must be either:\n"
+ "\t- None: domain is used instead\n"
+ "\t- 1d np.ndarray of positive int: indices\n")
if which == 'surface':
msg += "\t- list of 1d np.ndarray of positive indices\n"
msg += " You provided:\n{}".format(ind)
raise Exception(msg)
return res, domain, resMode, ind
def _Ves_get_sampleEdge(
VPoly,
res=None,
domain=None,
resMode=None,
offsetIn=0.0,
VIn=None,
margin=1.0e-9
):
# -------------
# Check inputs
# standard
res, domain, resMode, ind = _Ves_get_sample_checkinputs(
res=res,
domain=domain,
resMode=resMode,
ind=None,
which='edge',
)
# specific
ltypes = [int, float, np.int_, np.float_]
assert type(offsetIn) in ltypes
# -------------
# Compute
Pts, reseff, ind, N, Rref, VPolybis = _GG.discretize_vpoly(
VPoly,
float(res),
mode=resMode,
D1=domain[0],
D2=domain[1],
margin=margin,
DIn=float(offsetIn),
VIn=VIn,
)
return Pts, reseff, ind
def _Ves_get_sampleCross(
VPoly,
Min1,
Max1,
Min2,
Max2,
res=None,
domain=None,
resMode=None,
ind=None,
margin=1.0e-9,
mode="flat",
):
# -------------
# Check inputs
# standard
res, domain, resMode, ind = _Ves_get_sample_checkinputs(
res=res,
domain=domain,
resMode=resMode,
ind=ind,
which='cross',
)
# specific
assert mode in ["flat", "imshow"]
# -------------
# Compute
MinMax1 = np.array([Min1, Max1])
MinMax2 = np.array([Min2, Max2])
if ind is None:
if mode == "flat":
Pts, dS, ind, d1r, d2r = _GG.discretize_segment2d(
MinMax1,
MinMax2,
res[0],
res[1],
D1=domain[0],
D2=domain[1],
mode=resMode,
VPoly=VPoly,
margin=margin,
)
out = (Pts, dS, ind, (d1r, d2r))
else:
x1, d1r, ind1, N1 = _GG._Ves_mesh_dlfromL_cython(
MinMax1, res[0], domain[0], Lim=True,
dLMode=resMode, margin=margin
)
x2, d2r, ind2, N2 = _GG._Ves_mesh_dlfromL_cython(
MinMax2, res[1], domain[1], Lim=True,
dLMode=resMode, margin=margin
)
xx1, xx2 = np.meshgrid(x1, x2)
pts = np.squeeze([xx1, xx2])
extent = (
x1[0] - d1r / 2.0,
x1[-1] + d1r / 2.0,
x2[0] - d2r / 2.0,
x2[-1] + d2r / 2.0,
)
out = (pts, x1, x2, extent)
else:
assert mode == "flat"
c0 = type(ind) is np.ndarray and ind.ndim == 1
c0 = c0 and ind.dtype in ["int32", "int64"] and np.all(ind >= 0)
assert c0, "Arg ind must be a np.ndarray of int !"
Pts, dS, d1r, d2r = _GG._Ves_meshCross_FromInd(
MinMax1,
MinMax2,
res[0],
res[1],
ind,
dSMode=resMode,
margin=margin,
)
out = (Pts, dS, ind, (d1r, d2r))
return out
def _Ves_get_sampleS(
VPoly,
res=None,
domain=None,
resMode="abs",
ind=None,
offsetIn=0.0,
VIn=None,
VType="Tor",
VLim=None,
nVLim=None,
returnas="(X,Y,Z)",
margin=1.0e-9,
Multi=False,
Ind=None,
):
""" Sample the surface """
# -------------
# Check inputs
# standard
res, domain, resMode, ind = _Ves_get_sample_checkinputs(
res=res,
domain=domain,
resMode=resMode,
ind=ind,
which='surface',
)
# nVLim and VLim
if not (type(nVLim) in [int, np.int_] and nVLim >= 0):
msg = ("Arg nVLim must be a positive int\\n"
+ " You provided:\n{} ({})".format(nVLim, type(nVLim)))
raise Exception(msg)
VLim = None if (VLim is None or nVLim == 0) else np.array(VLim)
if not isinstance(Multi, bool):
msg = ("Arg Multi must be a bool!\n"
+ " You provided:\n{}".format(Multi))
raise Exception(msg)
# Check if Multi
if nVLim > 1:
assert VLim is not None, "For multiple Struct, Lim cannot be None !"
assert all([hasattr(ll, "__iter__") and len(ll) == 2 for ll in VLim])
if Ind is None:
Ind = np.arange(0, nVLim)
else:
Ind = [Ind] if not hasattr(Ind, "__iter__") else Ind
Ind = np.asarray(Ind).astype(int)
if ind is not None:
if isinstance(ind, np.ndarray):
ind = [ind for ii in range(len(Ind))]
elif not (isinstance(ind, list) and len(ind) == len(Ind)):
msg = ("Arg ind must be a list of same len() as Ind!\n"
+ "\t- provided: {}".format(ind))
raise Exception(msg)
else:
VLim = [None] if VLim is None else [VLim.ravel()]
if ind is not None:
if not isinstance(ind, np.ndarray):
msg = ("ind must be a np.ndarray if nVLim == 1\n"
+ "\t- provided: {}".format(ind))
raise Exception(msg)
Ind = [0]
if ind is None:
pts, dS, ind, reseff = (
[0 for ii in Ind],
[0 for ii in Ind],
[0 for ii in Ind],
[[0, 0] for ii in Ind],
)
if VType.lower() == "tor":
for ii in range(0, len(Ind)):
if VLim[Ind[ii]] is None:
(pts[ii],
dS[ii],
ind[ii],
NL,
reseff[ii][0],
Rref,
reseff[ii][1],
nRPhi0,
VPbis) = _GG._Ves_Smesh_Tor_SubFromD_cython(
res[0],
res[1],
VPoly,
DR=domain[0],
DZ=domain[1],
DPhi=domain[2],
DIn=offsetIn,
VIn=VIn,
PhiMinMax=None,
Out=returnas,
margin=margin,
)
else:
(pts[ii],
dS[ii],
ind[ii],
NL,
reseff[ii][0],
Rref,
dR0r,
dZ0r,
reseff[ii][1],
VPbis) = _GG._Ves_Smesh_TorStruct_SubFromD_cython(
VLim[Ind[ii]],
res[0],
res[1],
VPoly,
DR=domain[0],
DZ=domain[1],
DPhi=domain[2],
DIn=offsetIn,
VIn=VIn,
Out=returnas,
margin=margin,
)
reseff[ii] += [dR0r, dZ0r]
else:
for ii in range(0, len(Ind)):
(pts[ii],
dS[ii],
ind[ii],
NL,
reseff[ii][0],
Rref,
reseff[ii][1],
dY0r,
dZ0r,
VPbis) = _GG._Ves_Smesh_Lin_SubFromD_cython(
VLim[Ind[ii]],
res[0],
res[1],
VPoly,
DX=domain[0],
DY=domain[1],
DZ=domain[2],
DIn=offsetIn,
VIn=VIn,
margin=margin,
)
reseff[ii] += [dY0r, dZ0r]
else:
ind = ind if Multi else [ind]
pts, dS, reseff = (
[np.ones((3, 0)) for ii in Ind],
[0 for ii in Ind],
[[0, 0] for ii in Ind],
)
if VType.lower() == "tor":
for ii in range(0, len(Ind)):
if ind[Ind[ii]].size > 0:
if VLim[Ind[ii]] is None:
out_loc = _GG._Ves_Smesh_Tor_SubFromInd_cython(
res[0],
res[1],
VPoly,
ind[Ind[ii]],
DIn=offsetIn,
VIn=VIn,
PhiMinMax=None,
Out=returnas,
margin=margin,
)
pts[ii], dS[ii], NL, reseff[ii][0], Rref = out_loc[:5]
reseff[ii][1], nRPhi0, VPbis = out_loc[5:]
else:
out_loc = _GG._Ves_Smesh_TorStruct_SubFromInd_cython(
VLim[Ind[ii]],
res[0],
res[1],
VPoly,
ind[Ind[ii]],
DIn=offsetIn,
VIn=VIn,
Out=returnas,
margin=margin,
)
pts[ii], dS[ii], NL, reseff[ii][0], Rref = out_loc[:5]
dR0r, dZ0r, reseff[ii][1], VPbis = out_loc[5:]
reseff[ii] += [dR0r, dZ0r]
else:
for ii in range(0, len(Ind)):
if ind[Ind[ii]].size > 0:
out_loc = _GG._Ves_Smesh_Lin_SubFromInd_cython(
VLim[Ind[ii]],
res[0],
res[1],
VPoly,
ind[Ind[ii]],
DIn=offsetIn,
VIn=VIn,
margin=margin,
)
pts[ii], dS[ii], NL, reseff[ii][0], Rref = out_loc[:5]
reseff[ii][1], dY0r, dZ0r, VPbis = out_loc[5:]
reseff[ii] += [dY0r, dZ0r]
if len(VLim) == 1:
pts, dS, ind, reseff = pts[0], dS[0], ind[0], reseff[0]
return pts, dS, ind, reseff
def _Ves_get_sampleV(
VPoly,
Min1,
Max1,
Min2,
Max2,
res=None,
domain=None,
resMode=None,
ind=None,
VType="Tor",
VLim=None,
returnas="(X,Y,Z)",
margin=1.0e-9,
algo="new",
num_threads=48,
):
""" Sample the volume """
# -------------
# Check inputs
res, domain, resMode, ind = _Ves_get_sample_checkinputs(
res=res,
domain=domain,
resMode=resMode,
ind=ind,
which='volume',
)
# ------------
# Computation
MinMax1 = np.array([Min1, Max1])
MinMax2 = np.array([Min2, Max2])
VLim = None if VType.lower() == "tor" else np.array(VLim).ravel()
reseff = [None, None, None]
if ind is None:
if VType.lower() == "tor":
if algo.lower() == "new":
(pts, dV, ind,
reseff[0],
reseff[1],
reseff[2],
sz_r, sz_z,
) = _GG._Ves_Vmesh_Tor_SubFromD_cython(
res[0],
res[1],
res[2],
MinMax1,
MinMax2,
DR=domain[0],
DZ=domain[1],
DPhi=domain[2],
limit_vpoly=VPoly,
out_format=returnas,
margin=margin,
num_threads=num_threads,
)
else:
(pts, dV, ind,
reseff[0],
reseff[1],
reseff[2]) = _GG._Ves_Vmesh_Tor_SubFromD_cython_old(
res[0],
res[1],
res[2],
MinMax1,
MinMax2,
DR=domain[0],
DZ=domain[1],
DPhi=domain[2],
VPoly=VPoly,
Out=returnas,
margin=margin,
)
else:
(pts, dV, ind,
reseff[0],
reseff[1],
reseff[2]) = _GG._Ves_Vmesh_Lin_SubFromD_cython(
res[0],
res[1],
res[2],
VLim,
MinMax1,
MinMax2,
DX=domain[0],
DY=domain[1],
DZ=domain[2],
limit_vpoly=VPoly,
margin=margin,
)
else:
if VType.lower() == "tor":
if algo.lower() == "new":
(pts, dV,
reseff[0],
reseff[1],
reseff[2]) = _GG._Ves_Vmesh_Tor_SubFromInd_cython(
res[0],
res[1],
res[2],
MinMax1,
MinMax2,
ind,
Out=returnas,
margin=margin,
num_threads=num_threads,
)
else:
(pts, dV,
reseff[0],
reseff[1],
reseff[2]) = _GG._Ves_Vmesh_Tor_SubFromInd_cython_old(
res[0],
res[1],
res[2],
MinMax1,
MinMax2,
ind,
Out=returnas,
margin=margin,
)
else:
(pts, dV,
reseff[0],
reseff[1],
reseff[2]) = _GG._Ves_Vmesh_Lin_SubFromInd_cython(
res[0],
res[1],
res[2],
VLim,
MinMax1,
MinMax2,
ind,
margin=margin
)
return pts, dV, ind, reseff
# ==============================================================================
# = phi / theta projections for magfieldlines
# ==============================================================================
def _Struct_get_phithetaproj(ax=None, poly_closed=None, lim=None, noccur=0):
# phi = toroidal angle
if noccur == 0:
Dphi = np.array([[-np.pi, np.pi]])
nphi = np.r_[1]
else:
assert lim.ndim == 2, str(lim)
nphi = np.ones((noccur,), dtype=int)
ind = (lim[:, 0] > lim[:, 1]).nonzero()[0]
Dphi = np.concatenate((lim, np.full((noccur, 2), np.nan)), axis=1)
if ind.size > 0:
for ii in ind:
Dphi[ii, :] = [lim[ii, 0], np.pi, -np.pi, lim[ii, 1]]
nphi[ii] = 2
# theta = poloidal angle
Dtheta = np.arctan2(poly_closed[1, :] - ax[1], poly_closed[0, :] - ax[0])
Dtheta = np.r_[np.min(Dtheta), np.max(Dtheta)]
if Dtheta[0] > Dtheta[1]:
ntheta = 2
Dtheta = [Dtheta[0], np.pi, -np.pi, Dtheta[1]]
else:
ntheta = 1
return nphi, Dphi, ntheta, Dtheta
def _get_phithetaproj_dist(
poly_closed,
ax,
Dtheta,
nDtheta,
Dphi,
nDphi,
theta,
phi,
ntheta,
nphi,
noccur,
):
if nDtheta == 1:
ind = (theta >= Dtheta[0]) & (theta <= Dtheta[1])
else:
ind = (theta >= Dtheta[0]) | (theta <= Dtheta[1])
disttheta = np.full((theta.size,), np.nan)
# phi within Dphi
if noccur > 0:
indphi = np.zeros((nphi,), dtype=bool)
for ii in range(0, noccur):
for jj in range(0, nDphi[ii]):
indphi |= (phi >= Dphi[ii, jj]) & (phi <= Dphi[ii, jj + 1])
if not np.any(indphi):
return disttheta, indphi
else:
indphi = np.ones((nphi,), dtype=bool)
# No theta within Dtheta
if not np.any(ind):
return disttheta, indphi
# Check for non-parallel AB / u pairs
u = np.array([np.cos(theta), np.sin(theta)])
AB = np.diff(poly_closed, axis=1)
detABu = AB[0, :, None] * u[1, None, :] - AB[1, :, None] * u[0, None, :]
inddet = ind[None, :] & (np.abs(detABu) > 1.0e-9)
if not np.any(inddet):
return disttheta, indphi
nseg = poly_closed.shape[1] - 1
k = np.full((nseg, ntheta), np.nan)
OA = poly_closed[:, :-1] - ax[:, None]
detOAu = (OA[0, :, None] * u[1, None, :] - OA[1, :, None] * u[0, None, :])[
inddet
]
ss = -detOAu / detABu[inddet]
inds = (ss >= 0.0) & (ss < 1.0)
inddet[inddet] = inds
if not np.any(inds):
return disttheta, indphi
scaOAu = (OA[0, :, None] * u[0, None, :] + OA[1, :, None] * u[1, None, :])[
inddet
]
scaABu = (AB[0, :, None] * u[0, None, :] + AB[1, :, None] * u[1, None, :])[
inddet
]
k[inddet] = scaOAu + ss[inds] * scaABu
indk = k[inddet] > 0.0
inddet[inddet] = indk
if not np.any(indk):
return disttheta, indphi
k[~inddet] = np.nan
indok = np.any(inddet, axis=0)
disttheta[indok] = np.nanmin(k[:, indok], axis=0)
return disttheta, indphi
# ==============================================================================
# = LOS functions
# ==============================================================================
def LOS_PRMin(Ds, us, kOut=None, Eps=1.0e-12, squeeze=True, Test=True):
""" Compute the point on the LOS where the major radius is minimum """
if Test:
assert Ds.ndim in [1, 2, 3] and 3 in Ds.shape and Ds.shape == us.shape
if kOut is not None:
kOut = np.atleast_1d(kOut)
assert kOut.size == Ds.size / 3
if Ds.ndim == 1:
Ds, us = Ds[:, None, None], us[:, None, None]
elif Ds.ndim == 2:
Ds, us = Ds[:, :, None], us[:, :, None]
if kOut is not None:
if kOut.ndim == 1:
kOut = kOut[:, None]
_, nlos, nref = Ds.shape
kRMin = np.full((nlos, nref), np.nan)
uparN = np.sqrt(us[0, :, :] ** 2 + us[1, :, :] ** 2)
# Case with u vertical
ind = uparN > Eps
kRMin[~ind] = 0.0
# Else
kRMin[ind] = (
-(us[0, ind] * Ds[0, ind] + us[1, ind] * Ds[1, ind]) / uparN[ind] ** 2
)
# Check
kRMin[kRMin <= 0.0] = 0.0
if kOut is not None:
kRMin[kRMin > kOut] = kOut[kRMin > kOut]
# squeeze
if squeeze:
if nref == 1 and nlos == 11:
kRMin = kRMin[0, 0]
elif nref == 1:
kRMin = kRMin[:, 0]
elif nlos == 1:
kRMin = kRMin[0, :]
return kRMin
def LOS_CrossProj(
VType,
Ds,
us,
kOuts,
proj="All",
multi=False,
num_threads=16,
return_pts=False,
Test=True,
):
""" Compute the parameters to plot the poloidal projection of the LOS """
assert type(VType) is str and VType.lower() in ["tor", "lin"]
dproj = {
"cross": ("R", "Z"),
"hor": ("x,y"),
"all": ("R", "Z", "x", "y"),
"3d": ("x", "y", "z"),
}
assert type(proj) in [str, tuple]
if type(proj) is tuple:
assert all([type(pp) is str for pp in proj])
lcoords = proj
else:
proj = proj.lower()
assert proj in dproj.keys()
lcoords = dproj[proj]
if return_pts:
assert proj in ["cross", "hor", "3d"]
lc = [Ds.ndim == 3, Ds.shape == us.shape]
if not all(lc):
msg = "Ds and us must have the same shape and dim in [2,3]:\n"
msg += " - provided Ds.shape: %s\n" % str(Ds.shape)
msg += " - provided us.shape: %s" % str(us.shape)
raise Exception(msg)
lc = [kOuts.size == Ds.size / 3, kOuts.shape == Ds.shape[1:]]
if not all(lc):
msg = "kOuts must have the same shape and ndim = Ds.ndim-1:\n"
msg += " - Ds.shape : %s\n" % str(Ds.shape)
msg += " - kOutss.shape: %s" % str(kOuts.shape)
raise Exception(msg)
# Prepare inputs
_, nlos, nseg = Ds.shape
# Detailed sampling for 'tor' and ('cross' or 'all')
R, Z = None, None
if "R" in lcoords or "Z" in lcoords:
angcross = np.arccos(
np.sqrt(us[0, ...] ** 2 + us[1, ...] ** 2)
/ np.sqrt(np.sum(us ** 2, axis=0))
)
resnk = np.ceil(25.0 * (1 - (angcross / (np.pi / 4) - 1) ** 2) + 5)
resnk = 1.0 / resnk.ravel()
# Use optimized get sample
DL = np.vstack((np.zeros((nlos * nseg,), dtype=float), kOuts.ravel()))
k, reseff, lind = _GG.LOS_get_sample(
nlos * nseg,
resnk,
DL,
dmethod="rel",
method="simps",
num_threads=num_threads,
Test=Test,
)
assert lind.size == nseg * nlos - 1
ind = lind[nseg - 1 :: nseg] # noqa
nbrep = np.r_[lind[0], np.diff(lind), k.size - lind[-1]]
pts = np.repeat(Ds.reshape((3, nlos * nseg)), nbrep, axis=1) + k[
None, :
] * np.repeat(us.reshape((3, nlos * nseg)), nbrep, axis=1)
if return_pts:
pts = np.array([np.hypot(pts[0, :], pts[1, :]), pts[2, :]])
if multi:
pts = np.split(pts, ind, axis=1)
else:
pts = np.insert(pts, ind, np.nan, axis=1)
else:
if multi:
if "R" in lcoords:
R = np.split(np.hypot(pts[0, :], pts[1, :]), ind)
if "Z" in lcoords:
Z = np.split(pts[2, :], ind)
else:
if "R" in lcoords:
R = np.insert(np.hypot(pts[0, :], pts[1, :]), ind, np.nan)
if "Z" in lcoords:
Z = np.insert(pts[2, :], ind, np.nan)
# Normal sampling => pts
# unnecessary only if 'tor' and 'cross'
x, y, z = None, None, None
if "x" in lcoords or "y" in lcoords or "z" in lcoords:
pts = np.concatenate(
(Ds, Ds[:, :, -1:] + kOuts[None, :, -1:] * us[:, :, -1:]), axis=-1
)
if multi:
ind = np.arange(1, nlos) * (nseg + 1)
pts = pts.reshape((3, nlos * (nseg + 1)))
else:
nancoords = np.full((3, nlos, 1), np.nan)
pts = np.concatenate((pts, nancoords), axis=-1)
pts = pts.reshape((3, nlos * (nseg + 2)))
if return_pts:
assert proj in ["hor", "3d"]
if multi:
if proj == "hor":
pts = np.split(pts[:2, :], ind, axis=1)
else:
pts = np.split(pts, ind, axis=1)
elif proj == "hor":
pts = pts[:2, :]
else:
if multi:
if "x" in lcoords:
x = np.split(pts[0, :], ind)
if "y" in lcoords:
y = np.split(pts[1, :], ind)
if "z" in lcoords:
z = np.split(pts[2, :], ind)
else:
if "x" in lcoords:
x = pts[0, :]
if "y" in lcoords:
y = pts[1, :]
if "z" in lcoords:
z = pts[2, :]
if return_pts:
return pts
else:
return R, Z, x, y, z
# ==============================================================================
# = Meshing & signal
# ==============================================================================
def LOS_get_sample(D, u, dL, DL=None, dLMode="abs", method="sum", Test=True):
""" Return the sampled line, with the specified method
'linspace': return the N+1 edges, including the first and last point
'sum' : return the N middle of the segments
'simps': return the N+1 egdes, where N has to be even
(scipy.simpson requires an even number of intervals)
'romb' : return the N+1 edges, where N+1 = 2**k+1
(fed to scipy.romb for integration)
"""
if Test:
assert all(
[type(dd) is np.ndarray and dd.shape == (3,) for dd in [D, u]]
)
assert not hasattr(dL, "__iter__")
assert DL is None or all(
[
hasattr(DL, "__iter__"),
len(DL) == 2,
all([not hasattr(dd, "__iter__") for dd in DL]),
]
)
assert dLMode in ["abs", "rel"]
assert type(method) is str and method in [
"linspace",
"sum",
"simps",
"romb",
]
# Compute the min number of intervals to satisfy the specified resolution
N = (
int(np.ceil((DL[1] - DL[0]) / dL))
if dLMode == "abs"
else int(np.ceil(1.0 / dL))
)
# Modify N according to the desired method
if method == "simps":
N = N if N % 2 == 0 else N + 1
elif method == "romb":
N = 2 ** int(np.ceil(np.log(N) / np.log(2.0)))
# Derive k and dLr
if method == "sum":
dLr = (DL[1] - DL[0]) / N
k = DL[0] + (0.5 + np.arange(0, N)) * dLr
else:
k, dLr = np.linspace(
DL[0], DL[1], N + 1, endpoint=True, retstep=True, dtype=float
)
Pts = D[:, np.newaxis] + k[np.newaxis, :] * u[:, np.newaxis]
return Pts, k, dLr
def LOS_calc_signal(
ff, D, u, dL, DL=None, dLMode="abs", method="romb", Test=True
):
assert hasattr(ff, "__call__"), (
"Arg ff must be a callable (function) taking at least 1 positional ",
"Pts (a (3,N) np.ndarray of cartesian (X,Y,Z) coordinates) !",
)
assert not method == "linspace"
Pts, k, dLr = LOS_get_sample(
D, u, dL, DL=DL, dLMode=dLMode, method=method, Test=Test
)
out = insp(ff)
N = np.sum(
[
(
pp.kind == pp.POSITIONAL_OR_KEYWORD
and pp.default is pp.empty
)
for pp in out.parameters.values()
]
)
if N == 1:
Vals = ff(Pts)
elif N == 2:
Vals = ff(Pts, np.tile(-u, (Pts.shape[1], 1)).T)
else:
raise ValueError(
"The function (ff) assessing the emissivity locally "
+ "must take a single positional argument: Pts a (3,N)"
+ " np.ndarray of (X,Y,Z) cartesian coordinates !"
)
Vals[np.isnan(Vals)] = 0.0
if method == "sum":
Int = np.sum(Vals) * dLr
elif method == "simps":
Int = scpintg.simps(Vals, x=None, dx=dLr)
elif method == "romb":
Int = scpintg.romb(Vals, dx=dLr, show=False)
return Int
| mit |
Erotemic/plottool | plottool_ibeis/fig_presenter.py | 1 | 8747 | from __future__ import absolute_import, division, print_function
import sys
import time
import utool as ut
import matplotlib as mpl
from plottool_ibeis import custom_figure
#from .custom_constants import golden_wh
SLEEP_TIME = .01
__QT4_WINDOW_LIST__ = []
ut.noinject(__name__, '[fig_presenter]')
VERBOSE = ut.get_argflag(('--verbose-fig', '--verbfig', '--verb-pt'))
#(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[fig_presenter]', DEBUG=True)
def unregister_qt4_win(win):
global __QT4_WINDOW_LIST__
if win == 'all':
__QT4_WINDOW_LIST__ = []
else:
try:
#index = __QT4_WINDOW_LIST__.index(win)
__QT4_WINDOW_LIST__.remove(win)
except ValueError:
pass
def register_qt4_win(win):
global __QT4_WINDOW_LIST__
__QT4_WINDOW_LIST__.append(win)
# ---- GENERAL FIGURE COMMANDS ----
def set_geometry(fnum, x, y, w, h):
fig = custom_figure.ensure_fig(fnum)
qtwin = get_figure_window(fig)
qtwin.setGeometry(x, y, w, h)
def get_geometry(fnum):
fig = custom_figure.ensure_fig(fnum)
qtwin = get_figure_window(fig)
(x1, y1, x2, y2) = qtwin.geometry().getCoords()
(x, y, w, h) = (x1, y1, x2 - x1, y2 - y1)
return (x, y, w, h)
#def get_screen_info():
# # TODO Move dependency to guitool_ibeis
# desktop = QtWidgets.QDesktopWidget()
# mask = desktop.mask() # NOQA
# layout_direction = desktop.layoutDirection() # NOQA
# screen_number = desktop.screenNumber() # NOQA
# normal_geometry = desktop.normalGeometry() # NOQA
# num_screens = desktop.screenCount() # NOQA
# avail_rect = desktop.availableGeometry() # NOQA
# screen_rect = desktop.screenGeometry() # NOQA
# QtWidgets.QDesktopWidget().availableGeometry().center() # NOQA
# normal_geometry = desktop.normalGeometry() # NOQA
#@profile
def get_all_figures():
manager_list = mpl._pylab_helpers.Gcf.get_all_fig_managers()
all_figures = []
# Make sure you dont show figures that this module closed
for manager in manager_list:
try:
fig = manager.canvas.figure
except AttributeError:
continue
if not fig.__dict__.get('df2_closed', False):
all_figures.append(fig)
# Return all the figures sorted by their number
all_figures = sorted(all_figures, key=lambda fig: fig.number)
return all_figures
def get_all_qt4_wins():
return __QT4_WINDOW_LIST__
def all_figures_show():
if VERBOSE:
print('all_figures_show')
if not ut.get_argflag('--noshow'):
for fig in get_all_figures():
time.sleep(SLEEP_TIME)
show_figure(fig)
#fig.show()
#fig.canvas.draw()
def show_figure(fig):
try:
fig.show()
fig.canvas.draw()
except AttributeError as ex:
if not hasattr(fig, '_no_raise_plottool_ibeis'):
ut.printex(ex, '[pt] probably registered made figure with Qt.', iswarning=True)
def all_figures_tight_layout():
if '--noshow' not in sys.argv:
for fig in iter(get_all_figures()):
fig.tight_layout()
time.sleep(SLEEP_TIME)
def get_main_win_base():
if hasattr(mpl.backends, 'backend_qt4'):
backend = mpl.backends.backend_qt4
else:
backend = mpl.backends.backend_qt5
try:
QMainWin = backend.MainWindow
except Exception as ex:
try:
ut.printex(ex, 'warning', '[fig_presenter]')
#from guitool_ibeis.__PYQT__ import QtGui
QMainWin = backend.QtWidgets.QMainWindow
except Exception as ex1:
ut.printex(ex1, 'warning', '[fig_presenter]')
QMainWin = object
return QMainWin
def get_all_windows():
""" Returns all mpl figures and registered qt windows """
try:
all_figures = get_all_figures()
all_qt4wins = get_all_qt4_wins()
all_wins = all_qt4wins + [get_figure_window(fig) for fig in all_figures]
return all_wins
except AttributeError as ex:
ut.printex(ex, 'probably using a windowless backend',
iswarning=True)
return []
#@profile
def all_figures_tile(max_rows=None, row_first=True, no_tile=False,
monitor_num=None, percent_w=None, percent_h=None,
hide_toolbar=True):
"""
Lays out all figures in a grid. if wh is a scalar, a golden ratio is used
"""
#print('[plottool_ibeis] all_figures_tile()')
if no_tile:
return
current_backend = mpl.get_backend()
if not current_backend.startswith('Qt'):
#print('current_backend=%r is not a Qt backend. cannot tile.' % current_backend)
return
all_wins = get_all_windows()
num_wins = len(all_wins)
if num_wins == 0:
return
from plottool_ibeis import screeninfo
valid_positions = screeninfo.get_valid_fig_positions(num_wins, max_rows,
row_first, monitor_num,
percent_w=percent_w,
percent_h=percent_h)
QMainWin = get_main_win_base()
for ix, win in enumerate(all_wins):
isqt4_mpl = isinstance(win, QMainWin)
from guitool_ibeis.__PYQT__ import QtGui # NOQA
from guitool_ibeis.__PYQT__ import QtWidgets # NOQA
isqt4_back = isinstance(win, QtWidgets.QMainWindow)
isqt4_widget = isinstance(win, QtWidgets.QWidget)
(x, y, w, h) = valid_positions[ix]
#printDBG('tile %d-th win: xywh=%r' % (ix, (x, y, w, h)))
if not isqt4_mpl and not isqt4_back and not isqt4_widget:
raise NotImplementedError('%r-th Backend %r is not a Qt Window' %
(ix, win))
try:
if hide_toolbar:
toolbar = win.findChild(QtWidgets.QToolBar)
toolbar.setVisible(False)
win.setGeometry(x, y, w, h)
except Exception as ex:
ut.printex(ex)
def all_figures_bring_to_front():
try:
all_figures = get_all_figures()
for fig in iter(all_figures):
bring_to_front(fig)
except Exception as ex:
if not hasattr(fig, '_no_raise_plottool_ibeis'):
ut.printex(ex, iswarning=True)
def close_all_figures():
print('[pt] close_all_figures')
all_figures = get_all_figures()
for fig in iter(all_figures):
close_figure(fig)
def close_figure(fig):
print('[pt] close_figure')
fig.clf()
fig.df2_closed = True
qtwin = get_figure_window(fig)
qtwin.close()
def get_figure_window(fig):
try:
qwin = fig.canvas.manager.window
except AttributeError:
qwin = fig.canvas.window()
return qwin
def bring_to_front(fig):
if VERBOSE:
print('[pt] bring_to_front')
#what is difference between show and show normal?
qtwin = get_figure_window(fig)
qtwin.raise_()
#if not ut.WIN32:
# NOT sure on the correct order of these
# can cause the figure geometry to be unset
from guitool_ibeis.__PYQT__.QtCore import Qt
qtwin.activateWindow()
qtwin.setWindowFlags(Qt.WindowStaysOnTopHint)
qtwin.setWindowFlags(Qt.WindowFlags(0))
qtwin.show()
def show():
if VERBOSE:
print('[pt] show')
all_figures_show()
all_figures_bring_to_front()
#plt.show()
def reset():
if VERBOSE:
print('[pt] reset')
close_all_figures()
def draw():
if VERBOSE:
print('[pt] draw')
all_figures_show()
def update():
if VERBOSE:
print('[pt] update')
draw()
all_figures_bring_to_front()
def iupdate():
if VERBOSE:
print('[pt] iupdate')
if ut.inIPython():
update()
iup = iupdate
def present(*args, **kwargs):
"""
basically calls show if not embeded.
Kwargs:
max_rows, row_first, no_tile, monitor_num, percent_w, percent_h,
hide_toolbar
CommandLine:
python -m plottool_ibeis.fig_presenter present
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.fig_presenter import * # NOQA
>>> result = present()
>>> print(result)
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
if VERBOSE:
print('[pt] present')
if not ut.get_argflag('--noshow'):
#print('[fig_presenter] Presenting figures...')
#with warnings.catch_warnings():
# warnings.simplefilter("ignore")
all_figures_tile(*args, **kwargs)
# Both of these lines cause the weird non-refresh black border behavior
all_figures_show()
all_figures_bring_to_front()
| apache-2.0 |
PYPIT/PYPIT | pypeit/fluxspec.py | 1 | 20140 | # Module for guiding Slit/Order tracing
from __future__ import absolute_import, division, print_function
import inspect
import numpy as np
import linetools
import os
import json
import matplotlib.pyplot as plt
#from importlib import reload
from astropy import units
from astropy.io import fits
from pypeit import msgs
from pypeit.core import flux
from pypeit.core import load
from pypeit.core import save
from pypeit import utils
from pypeit import masterframe
from pypeit import specobjs
from pypeit.spectrographs.util import load_spectrograph
from pypeit.par.pypeitpar import TelescopePar
from pypeit import debugger
class FluxSpec(object):
"""Class to guide fluxing
Parameters
----------
spectrograph : Spectrograph or str
Name of the spectrograph, e.g. shane_kast_blue
Used only to set settings for calls to the Class outside of PypeIt
This includes extinction data..
par: FluxCalib parset
sens_file : str, optional
Filename of a sensitivity function file to be input
Attributes
----------
sensfunc : dict
Sensitivity function
steps : list
List of steps performed
frametype : str
Set to 'sensfunc'
std : SpecObj
The chosen one for generating the sensitivity function
std_header : dict-like
Used for the RA, DEC, AIRMASS, EXPTIME of the standard star spectrum
std_idx : int or list
Index that std is within the std_specbojs list
sci_specobjs : SpecObjs
List of SpecObj objects to be fluxed (or that were fluxed)
sci_header : dict-like
Used for the airmass, exptime of the science spectra
spectrograph : Spectrograph
Used for extinction correction
"""
# Frametype is a class attribute
frametype = 'sensfunc'
# JFH TODO In my opinion the things that the class operates on should be arguments and the parameters
# should be in the parset. I really don't like to see parsets used in this way where everything is passed in
# under the hood. It makes it challenging to understand what the code is doing.
def __init__(self, spectrograph, par, sens_file=None, debug=False):
# Init
self.spectrograph = spectrograph
self.par = par
# Get the extinction data
self.extinction_data = flux.load_extinction_data(
self.spectrograph.telescope['longitude'], self.spectrograph.telescope['latitude'])
# Parameters
if sens_file is None:
self.sens_file = par['sensfunc']
else:
self.sens_file = sens_file
self.multi_det = par['multi_det']
# Set telluric option
self.telluric = par['telluric']
# Main outputs
self.sens_dict = None if self.sens_file is None \
else self.load_sens_dict(self.sens_file)
# Attributes
self.steps = []
# Key Internals
self.std = None # Standard star spectrum (SpecObj object)
self.std_idx = None # Nested indices for the std_specobjs list that corresponds
# to the star!
# standard/telluric star information
self.star_type = par['star_type']
self.star_mag = par['star_mag']
# telluric mask keywords
self.BALM_MASK_WID = par['balm_mask_wid']
# sensfunc fitting parameters
self.poly_norder = par['poly_norder']
self.polycorrect = par['polycorrect']
self.debug = debug
# TODO This needs to be modified to return whatever it loaded. Pypeline independent.
def load_objs(self, spec1d_file, std=True):
"""
Load specobjs and heade from an input spec1d_file
Args:
spec1d_file: str
std: bool, optional
If True, load up standard star file and header
If False, load up science file and header
Returns:
Loads up self.std_specobjs or self.sci_specobjs
"""
specobjs, header = load.load_specobjs(spec1d_file)
if std:
self.std_specobjs, self.std_header = specobjs, header
msgs.info('Loaded {0} spectra from the spec1d standard star file: {1}'.format(
len(self.std_specobjs), spec1d_file))
self.std_ra = self.std_header['RA']
self.std_dec = self.std_header['DEC']
self.std_file = self.std_header['FILENAME']
else:
self.sci_specobjs, self.sci_header = specobjs, header
msgs.info('Loaded {0} spectra from the spec1d science file: {1}'.format(
len(self.sci_specobjs), spec1d_file))
# Check instrument
spectro = header['INSTRUME']
assert spectro == self.spectrograph.spectrograph
return specobjs, header
def find_standard(self):
"""
Identify the standard star from the list of all spectra in the specobjs
Wrapper to flux.find_standard which simply takes the brightest
Returns
-------
self.std : SpecObj
Corresponds to the chosen spectrum
"""
if self.par['std_obj_id'] is not None:
_ = self._set_std_obj()
return
if self.multi_det is not None:
sv_stds = []
# Find the standard in each detector
for det in self.multi_det:
stds = [sobj for sobj in self.std_specobjs if sobj.det == det]
if len(stds) == 0:
debugger.set_trace()
idx = flux.find_standard(stds)
sv_stds.append(stds[idx])
msgs.info("Using standard {} for det={}".format(stds[idx], det))
# Now splice
msgs.info("Splicing the standards -- The name will be for the first detector")
std_splice = sv_stds[0].copy()
# Append
for ostd in sv_stds[1:]:
try:
std_splice.optimal['WAVE_GRID'] = np.append(std_splice.optimal['WAVE_GRID'].value,
ostd.optimal['WAVE_GRID'].value) * units.AA
except KeyError:
std_splice.optimal['WAVE'] = np.append(std_splice.optimal['WAVE'].value,
ostd.optimal['WAVE'].value) * units.AA
for key in ['COUNTS', 'COUNTS_IVAR']:
std_splice.optimal[key] = np.append(std_splice.optimal[key], ostd.optimal[key])
self.std = std_splice
elif self.spectrograph.pypeline == 'Echelle':
# Find brightest object in each order
std_brightest = self.std_specobjs[flux.find_standard(self.std_specobjs)]
std_objid = std_brightest['idx'].split('-')[0]
self.std_idx = np.zeros(len(self.std_specobjs), dtype=bool)
for ii in range(len(self.std_specobjs)):
if std_objid in self.std_specobjs[ii]['idx']:
self.std_idx[ii] = True
# Set internal
self.std = self.std_specobjs[self.std_idx]
# Step
self.steps.append(inspect.stack()[0][3])
# Return
return self.std
else:
# Find brightest object in the exposures
# Searches over all slits (over all detectors), and all objects
self.std_idx = flux.find_standard(self.std_specobjs)
# Set internal
self.std = self.std_specobjs[self.std_idx]
# Step
self.steps.append(inspect.stack()[0][3])
# Return
return self.std
def generate_sensfunc(self):
"""
Dummy method for generating sensfunc. Overloaded by class specific sensfunc generator.
Returns:
"""
return None
def flux_science(self, sci_file):
"""
Dummy method for fluxing. Overloaded by class specific fluxing
Returns
"""
return
def _set_std_obj(self, obj_id=None):
"""
Method which allows the user to identify the standard star
with an input
Parameters
----------
obj_id : str or int
If str, object id of the spectrum
If int, index in the internal std_specobjs list
Returns
-------
self.std : SpecObj
"""
# From parameters?
if obj_id is None:
obj_id = self.par['std_obj_id']
#
if self.std_specobjs is None:
msgs.warn("You need to load in the Standard spectra first!")
return None
#
if isinstance(obj_id, str):
names = [spobj.idx for spobj in self.std_specobjs]
self.std_idx = names.index(obj_id)
elif isinstance(obj_id, int): # Extension number
self.std_idx = obj_id-1
self.std = self.std_specobjs[self.std_idx]
# Step
self.steps.append(inspect.stack()[0][3])
# Return
return self.std
def save_sens_dict(self, sens_dict, outfile):
"""
Save the sens_dict. Wrapper for save.save_sens_dict
Args:
sens_dict:
outfile:
Returns:
"""
save.save_sens_dict(sens_dict, outfile)
def load_sens_dict(self, filename):
self.sens_dict = load.load_sens_dict(filename)
return self.sens_dict
# TODO Need to improve this QA, it is really not informative
# ToDo: either make it a dummy function or make it works for both multislit and echelle.
def show_sensfunc(self):
"""
Plot the sensitivity function
"""
if self.sens_dict is None:
msgs.warn("You need to generate the sens_dict first!")
return None
# Generate from model
#wave = np.linspace(self.sens_dict['wave_min'], self.sens_dict['wave_max'], 1000)
#mag_func = utils.func_val(self.sens_dict['c'], wave, self.sens_dict['func'])
#sens = 10.0**(0.4*mag_func)
# Plot
debugger.plot1d(self.sens_dict['wave'], self.sens_dict['sensfunc'], xlbl='Wavelength', ylbl='Sensitivity Function')
def write_science(self, outfile):
"""
Write the flux-calibrated science spectra
Parameters
----------
outfile : str
Returns
-------
"""
if len(self.sci_specobjs) == 0:
msgs.warn("No science spectra to write to disk!")
#
if 'VEL-TYPE' in self.sci_header.keys():
helio_dict = dict(refframe=self.sci_header['VEL-TYPE'],
vel_correction=self.sci_header['VEL'])
else:
helio_dict = None
# KLUDGE ME
if isinstance(self.sci_specobjs, list):
specObjs = specobjs.SpecObjs(self.sci_specobjs)
elif isinstance(self.sci_specobjs, specobjs.SpecObjs):
specObjs = self.sci_specobjs
else:
msgs.error("BAD INPUT")
#save_1d_spectra_fits(specObjs, header, spectrograph, outfile, helio_dict=None, overwrite=True,update_det=None)
save.save_1d_spectra_fits(specObjs, self.sci_header, self.spectrograph, outfile,
helio_dict=helio_dict,overwrite=True)
# Step
self.steps.append(inspect.stack()[0][3])
def __repr__(self):
# Generate sets string
txt = '<{:s}: '.format(self.__class__.__name__)
if len(self.steps) > 0:
txt+= ' steps: ['
for step in self.steps:
txt += '{:s}, '.format(step)
txt = txt[:-2]+']' # Trim the trailing comma
txt += '>'
return txt
class MultiSlit(FluxSpec):
"""
Child of FluxSpec for Multislit and Longslit reductions
"""
def __init__(self, spectrograph, par, **kwargs):
super(MultiSlit, self).__init__(spectrograph, par, **kwargs)
def generate_sensfunc(self):
"""
Generate the senstivity function
Wrapper to flux.generate_sensfunc
Requires self.std has been set
Returns
-------
self.sensfunc : dict
"""
# Check internals
if self.std is None:
msgs.warn('First identify the star first (with find_standard).')
return None
if self.std_header is None:
msgs.warn('First set std_header with a dict-like object holding RA, DEC, '
'AIRMASS, EXPTIME.')
return None
self.sens_dict = {}
try:
this_wave = self.std.optimal['WAVE_GRID']
except KeyError:
this_wave = self.std.optimal['WAVE']
sens_dict_long = flux.generate_sensfunc(this_wave,
self.std.optimal['COUNTS'],
self.std.optimal['COUNTS_IVAR'],
self.std_header['AIRMASS'],
self.std_header['EXPTIME'],
self.spectrograph.telescope['longitude'],
self.spectrograph.telescope['latitude'],
BALM_MASK_WID=self.par['balm_mask_wid'],
telluric=self.telluric,
ra=self.std_ra,
dec=self.std_dec,
std_file = self.std_file,
debug=self.debug)
self.sens_dict['0'] = sens_dict_long
self.sens_dict['nslits'] = 1
# Step
self.steps.append(inspect.stack()[0][3])
# Return
return self.sens_dict
def flux_science(self, sci_file):
"""
Flux the internal list of sci_specobjs
Wrapper to flux.apply_sensfunc()
Returns
-------
"""
# Load
self.load_objs(sci_file, std=False)
# Run
for sci_obj in self.sci_specobjs:
flux.apply_sensfunc(sci_obj, self.sens_dict['0'], self.sci_header['AIRMASS'],
self.sci_header['EXPTIME'], telluric_correct=self.par['telluric_correct'],
extinct_correct=self.par['extinct_correct'],
longitude=self.spectrograph.telescope['longitude'],
latitude=self.spectrograph.telescope['latitude'])
self.steps.append(inspect.stack()[0][3])
class Echelle(FluxSpec):
"""
Child of FluxSpec for Echelle reductions
"""
def __init__(self, spectrograph, par, **kwargs):
super(Echelle, self).__init__(spectrograph, par, **kwargs)
# Echelle key
# TODO add these to the parameters to the parset or try to get rid of these parameters in flux.py
self.resolution = 3000. #par['resolution']
self.nresln = 10.0 #par['nresln']
def generate_sensfunc(self):
"""
Generate the senstivity function
Wrapper to flux.generate_sensfunc
Requires self.std has been set
Returns
-------
self.sensfunc : dict
"""
# Check internals
if self.std is None:
msgs.warn('First identify the star first (with find_standard).')
return None
if self.std_header is None:
msgs.warn('First set std_header with a dict-like object holding RA, DEC, '
'AIRMASS, EXPTIME.')
return None
ext_final = fits.getheader(self.par['std_file'], -1)
norder = ext_final['ECHORDER'] + 1
self.sens_dict = {}
for iord in range(norder):
std_specobjs, std_header = load.load_specobjs(self.par['std_file'], order=iord)
std_idx = flux.find_standard(std_specobjs)
std = std_specobjs[std_idx]
try:
wavemask = std.optimal['WAVE_GRID'] > 0.0 #*units.AA
except KeyError:
wavemask = std.optimal['WAVE'] > 1000.0 * units.AA
this_wave = std.optimal['WAVE'][wavemask]
else:
this_wave = std.optimal['WAVE_GRID'][wavemask]
counts, ivar = std.optimal['COUNTS'][wavemask], std.optimal['COUNTS_IVAR'][wavemask]
sens_dict_iord = flux.generate_sensfunc(this_wave, counts, ivar,
float(self.std_header['AIRMASS']),
self.std_header['EXPTIME'],
self.spectrograph.telescope['longitude'],
self.spectrograph.telescope['latitude'],
star_type=self.star_type,
star_mag=self.star_mag,
telluric=self.telluric, ra=self.std_ra, dec=self.std_dec,
resolution=self.resolution,
BALM_MASK_WID=self.BALM_MASK_WID, std_file=self.std_file,
poly_norder=self.poly_norder,
polycorrect=self.polycorrect, debug=self.debug)
sens_dict_iord['ech_orderindx'] = iord
self.sens_dict[str(iord)] = sens_dict_iord
## add some keys to be saved into primary header in masterframe
for key in ['wave_max', 'exptime', 'airmass', 'std_file', 'std_ra', 'std_dec',
'std_name', 'cal_file']:
try:
self.sens_dict[key] = sens_dict_iord[key]
except:
pass
self.sens_dict['meta'] = {}
self.sens_dict['meta']['nslits'] = norder
self.sens_dict['wave_min'] = self.sens_dict['0']['wave_min']
# Step
self.steps.append(inspect.stack()[0][3])
# Return
return self.sens_dict
def flux_science(self, sci_file):
"""
Flux the internal list of sci_specobjs
Wrapper to flux.apply_sensfunc()
Returns
-------
"""
# Load
self.load_objs(sci_file, std=False)
# Run
norder = self.sens_dict['meta']['nslits']
for iord in range(norder):
sens_dict_iord = self.sens_dict[str(iord)]
for sci_obj in self.sci_specobjs:
if sci_obj.ech_orderindx == iord:
flux.apply_sensfunc(sci_obj, sens_dict_iord, float(self.sci_header['AIRMASS']),
self.sci_header['EXPTIME'], extinct_correct=self.par['extinct_correct'],
longitude=self.spectrograph.telescope['longitude'],
latitude=self.spectrograph.telescope['latitude'])
self.steps.append(inspect.stack()[0][3])
def instantiate_me(spectrograph, par, **kwargs):
"""
Instantiate the FluxSpec subclass appropriate for the provided spectrograph.
The class must be subclassed from FluxSpec. See :class:`FluxSpec` for
the description of the valid keyword arguments.
Args:
spectrograph : Spectrograph or str
Name of the spectrograph, e.g. shane_kast_blue
Used only to set settings for calls to the Class outside of PypeIt
This includes extinction data..
par: FluxCalib parset
sens_file : str, optional
Filename of a sensitivity function file to be input
Returns:
:class:`PypeIt`: One of the classes with :class:`PypeIt` as its
base.
"""
indx = [ c.__name__ == spectrograph.pypeline for c in FluxSpec.__subclasses__() ]
if not np.any(indx):
msgs.error('Pipeline {0} is not defined!'.format(spectrograph.pypeline))
return FluxSpec.__subclasses__()[np.where(indx)[0][0]](spectrograph, par, **kwargs) | gpl-3.0 |
ChanChiChoi/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
chrjxj/zipline | zipline/assets/futures.py | 9 | 5290 | from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(as_of_date)
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
dt = self._get_datetime()
if (self._last_updated is None) or (self._last_updated != dt):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date,
dt
)
self._last_updated = dt
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=dt
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/groupby/test_groupby_subclass.py | 1 | 2289 | from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
def test_groupby_preserves_subclass(obj, groupby_func):
# GH28330 -- preserve subclass through groupby operations
if isinstance(obj, Series) and groupby_func in {"corrwith"}:
pytest.skip("Not applicable")
grouped = obj.groupby(np.arange(0, 10))
# Groups should preserve subclass type
assert isinstance(grouped.get_group(0), type(obj))
args = []
if groupby_func in {"fillna", "nth"}:
args.append(0)
elif groupby_func == "corrwith":
args.append(obj)
elif groupby_func == "tshift":
args.extend([0, 0])
result1 = getattr(grouped, groupby_func)(*args)
result2 = grouped.agg(groupby_func, *args)
# Reduction or transformation kernels should preserve type
slices = {"ngroup", "cumcount", "size"}
if isinstance(obj, DataFrame) and groupby_func in slices:
assert isinstance(result1, obj._constructor_sliced)
else:
assert isinstance(result1, type(obj))
# Confirm .agg() groupby operations return same results
if isinstance(result1, DataFrame):
tm.assert_frame_equal(result1, result2)
else:
tm.assert_series_equal(result1, result2)
@pytest.mark.parametrize(
"obj", [DataFrame, tm.SubclassedDataFrame],
)
def test_groupby_resample_preserves_subclass(obj):
# GH28330 -- preserve subclass through groupby.resample()
df = obj(
{
"Buyer": "Carl Carl Carl Carl Joe Carl".split(),
"Quantity": [18, 3, 5, 1, 9, 3],
"Date": [
datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0),
],
}
)
df = df.set_index("Date")
# Confirm groupby.resample() preserves dataframe type
result = df.groupby("Buyer").resample("5D").sum()
assert isinstance(result, obj)
| bsd-3-clause |
StefReck/Km3-Autoencoder | scripts/make_hists.py | 1 | 16975 | # -*- coding: utf-8 -*-
import h5py
import numpy as np
import km3pipe as kp
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ rauskopiert:
def get_primary_track_index(event_blob):
"""
Gets the index of the primary (neutrino) track.
Uses bjorkeny in order to get the primary track, since bjorkeny!=0 for the initial interacting neutrino.
:param kp.io.HDF5Pump.blob event_blob: HDF5Pump event blob.
:return: int primary index: Index of the primary track (=neutrino) in the 'McTracks' branch.
"""
bjorken_y_array = event_blob['bjorkeny']
primary_index = np.where(bjorken_y_array != 0.0)[0][0]
return primary_index
def get_event_data(event_blob, geo):
p = get_primary_track_index(event_blob)
# parse tracks [event_id, particle_type, energy, isCC, bjorkeny, dir_x/y/z, time]
event_id = event_blob['EventInfo'].event_id[0]
particle_type = event_blob['McTracks'][p].type
energy = event_blob['McTracks'][p].energy
is_cc = event_blob['McTracks'][p].is_cc
bjorkeny = event_blob['McTracks'][p].bjorkeny
dir_x = event_blob['McTracks'][p].dir[0]
dir_y = event_blob['McTracks'][p].dir[1]
dir_z = event_blob['McTracks'][p].dir[2]
time = event_blob['McTracks'][p].time
event_track = np.array([event_id, particle_type, energy, is_cc, bjorkeny, dir_x, dir_y, dir_z, time], dtype=np.float32)
# parse hits [x,y,z,time]
#Veranderter code:
pos_x = np.array(hits["pos_x"]).astype('float32')
pos_y = np.array(hits["pos_y"]).astype('float32')
pos_z = np.array(hits["pos_z"]).astype('float32')
time = np.array(hits["time"]).astype('float32')
ax = np.newaxis
event_hits = np.concatenate([pos_x[:, ax], pos_y[:, ax], pos_z[:, ax], time[:, ax]], axis=1)
# event_hits: 2D hits array for one event, event_track: 1D track array containing event information
return event_hits, event_track
def get_time_parameters(t, t_start_margin=0.15, t_end_margin=0.15):
"""
Gets the fundamental time parameters in one place for cutting a time residual.
Later on these parameters cut out a certain time span of events specified by t_start and t_end.
:param ndarray(ndim=1) t: time column of the event_hits array.
:param float t_start_margin: defines the start time of the selected timespan with t_mean - t_start * t_diff.
:param float t_end_margin: defines the end time of the selected timespan with t_mean + t_start * t_diff.
:return: float t_start, t_end: absolute start and end time that will be used for the later timespan cut.
Events in this timespan are accepted, others are rejected.
"""
t_min = np.amin(t)
t_max = np.amax(t)
t_diff = t_max - t_min
t_mean = t_min + 0.5 * t_diff
t_start = t_mean - t_start_margin * t_diff
t_end = t_mean + t_end_margin * t_diff
return t_start, t_end
#return t_min, t_max # for no time cut
def calculate_bin_edges(n_bins, fname_geo_limits):
"""
Calculates the bin edges for the later np.histogramdd actions based on the number of specified bins.
This is performed in order to get the same bin size for each event regardless of the fact if all bins have a hit or not.
:param tuple n_bins: contains the desired number of bins for each dimension. [n_bins_x, n_bins_y, n_bins_z]
:param str fname_geo_limits: filepath of the .txt ORCA geometry file.
:return: ndarray(ndim=1) x_bin_edges, y_bin_edges, z_bin_edges: contains the resulting bin edges for each dimension.
"""
#print "Reading detector geometry in order to calculate the detector dimensions from file " + fname_geo_limits
geo = np.loadtxt(fname_geo_limits)
# derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]
geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)
#print 'Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits)
x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines
y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483
z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329
#calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges
return x_bin_edges, y_bin_edges, z_bin_edges
def compute_4d_to_2d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_2d_hists): #, event_track, do2d_pdf):
"""
Computes 2D numpy histogram 'images' from the 4D data.
:param ndarray(ndim=2) event_hits: 2D array that contains the hits (_xyzt) data for a certain eventID. [positions_xyz, time]
:param ndarray(ndim=1) x_bin_edges: bin edges for the X-direction.
:param ndarray(ndim=1) y_bin_edges: bin edges for the Y-direction.
:param ndarray(ndim=1) z_bin_edges: bin edges for the Z-direction.
:param tuple n_bins: Contains the number of bins that should be used for each dimension (x,y,z,t).
:param list all_4d_to_2d_hists: contains all 2D histogram projections.
:param ndarray(ndim=2) event_track: contains the relevant mc_track info for the event in order to get a nice title for the pdf histos.
:param bool do2d_pdf: if True, generate 2D matplotlib pdf histograms.
:return: appends the 2D histograms to the all_4d_to_2d_hists list.
"""
x = event_hits[:, 0]
y = event_hits[:, 1]
z = event_hits[:, 2]
t = event_hits[:, 3]
# analyze time
t_start, t_end = get_time_parameters(t, t_start_margin=0.15, t_end_margin=0.15)
# create histograms for this event
hist_xy = np.histogram2d(x, y, bins=(x_bin_edges, y_bin_edges)) # hist[0] = H, hist[1] = xedges, hist[2] = yedges
hist_xz = np.histogram2d(x, z, bins=(x_bin_edges, z_bin_edges))
hist_yz = np.histogram2d(y, z, bins=(y_bin_edges, z_bin_edges))
hist_xt = np.histogram2d(x, t, bins=(x_bin_edges, n_bins[3]), range=((min(x_bin_edges), max(x_bin_edges)), (t_start, t_end)))
hist_yt = np.histogram2d(y, t, bins=(y_bin_edges, n_bins[3]), range=((min(y_bin_edges), max(y_bin_edges)), (t_start, t_end)))
hist_zt = np.histogram2d(z, t, bins=(z_bin_edges, n_bins[3]), range=((min(z_bin_edges), max(z_bin_edges)), (t_start, t_end)))
# Format in classical numpy convention: x along first dim (vertical), y along second dim (horizontal)
all_4d_to_2d_hists.append((np.array(hist_xy[0], dtype=np.uint8),
np.array(hist_xz[0], dtype=np.uint8),
np.array(hist_yz[0], dtype=np.uint8),
np.array(hist_xt[0], dtype=np.uint8),
np.array(hist_yt[0], dtype=np.uint8),
np.array(hist_zt[0], dtype=np.uint8)))
def compute_4d_to_3d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_3d_hists):
"""
Computes 3D numpy histogram 'images' from the 4D data.
Careful: Currently, appending to all_4d_to_3d_hists takes quite a lot of memory (about 200MB for 3500 events).
In the future, the list should be changed to a numpy ndarray.
(Which unfortunately would make the code less readable, since an array is needed for each projection...)
:param ndarray(ndim=2) event_hits: 2D array that contains the hits (_xyzt) data for a certain eventID. [positions_xyz, time]
:param ndarray(ndim=1) x_bin_edges: bin edges for the X-direction.
:param ndarray(ndim=1) y_bin_edges: bin edges for the Y-direction.
:param ndarray(ndim=1) z_bin_edges: bin edges for the Z-direction.
:param tuple n_bins: Declares the number of bins that should be used for each dimension (x,y,z,t).
:param list all_4d_to_3d_hists: contains all 3D histogram projections.
:return: appends the 3D histograms to the all_4d_to_3d_hists list. [xyz, xyt, xzt, yzt, rzt]
"""
x = event_hits[:, 0:1]
y = event_hits[:, 1:2]
z = event_hits[:, 2:3]
t = event_hits[:, 3:4]
t_start, t_end = get_time_parameters(t, t_start_margin=0.15, t_end_margin=0.15)
#New:
#condition to filter for in xyz histogram
con=(t>t_start) & (t<t_end)
hist_xyz = np.histogramdd(event_hits[con[:,0], 0:3], bins=(x_bin_edges, y_bin_edges, z_bin_edges))
hist_xyt = np.histogramdd(np.concatenate([x, y, t], axis=1), bins=(x_bin_edges, y_bin_edges, n_bins[3]),
range=((min(x_bin_edges), max(x_bin_edges)), (min(y_bin_edges), max(y_bin_edges)), (t_start, t_end)))
hist_xzt = np.histogramdd(np.concatenate([x, z, t], axis=1), bins=(x_bin_edges, z_bin_edges, n_bins[3]),
range=((min(x_bin_edges), max(x_bin_edges)), (min(z_bin_edges), max(z_bin_edges)), (t_start, t_end)))
hist_yzt = np.histogramdd(event_hits[:, 1:4], bins=(y_bin_edges, z_bin_edges, n_bins[3]),
range=((min(y_bin_edges), max(y_bin_edges)), (min(z_bin_edges), max(z_bin_edges)), (t_start, t_end)))
# add a rotation-symmetric 3d hist
"""
r = np.sqrt(x * x + y * y)
rzt = np.concatenate([r, z, t], axis=1)
hist_rzt = np.histogramdd(rzt, bins=(n_bins[0], n_bins[2], n_bins[3]), range=((np.amin(r), np.amax(r)), (np.amin(z), np.amax(z)), (t_start, t_end)))
"""
all_4d_to_3d_hists.append((np.array(hist_xyz[0], dtype=np.uint8),
np.array(hist_xyt[0], dtype=np.uint8),
np.array(hist_xzt[0], dtype=np.uint8),
np.array(hist_yzt[0], dtype=np.uint8)))#, np.array(hist_rzt[0], dtype=np.uint8)))
def compute_4d_to_4d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_4d_hists):
"""
Computes 4D numpy histogram 'images' from the 4D data.
:param ndarray(ndim=2) event_hits: 2D array that contains the hits (_xyzt) data for a certain eventID. [positions_xyz, time]
:param ndarray(ndim=1) x_bin_edges: bin edges for the X-direction.
:param ndarray(ndim=1) y_bin_edges: bin edges for the Y-direction.
:param ndarray(ndim=1) z_bin_edges: bin edges for the Z-direction.
:param tuple n_bins: Declares the number of bins that should be used for each dimension (x,y,z,t).
:param list all_4d_to_4d_hists: contains all 4D histogram projections.
:return: appends the 4D histogram to the all_4d_to_4d_hists list. [xyzt]
"""
t = event_hits[:, 3:4]
t_start, t_end = get_time_parameters(t, t_start_margin=0.15, t_end_margin=0.15)
hist_xyzt = np.histogramdd(event_hits[:, 0:4], bins=(x_bin_edges, y_bin_edges, z_bin_edges, n_bins[3]),
range=((min(x_bin_edges),max(x_bin_edges)),(min(y_bin_edges),max(y_bin_edges)),
(min(z_bin_edges),max(z_bin_edges)),(t_start, t_end)))
all_4d_to_4d_hists.append(np.array(hist_xyzt[0], dtype=np.uint8))
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Ende rauskopiert
file=h5py.File("Daten\example.h5", "r")
hits=file['hits']
pos_x = np.array(hits["pos_x"]).astype('float32')
pos_y = np.array(hits["pos_y"]).astype('float32')
pos_z = np.array(hits["pos_z"]).astype('float32')
time = np.array(hits["time"]).astype('float32')
event_id=np.array(hits["event_id"])
#Filter for events with high energy:
high_e=file['mc_tracks']["energy"] > 99
high_e_id=file['mc_tracks']["event_id"][high_e]
#Filter out a specific event from the 3500ish in the same file
target_event= high_e_id[0]
pos_x=pos_x[event_id==target_event]
pos_y=pos_y[event_id==target_event]
pos_z=pos_z[event_id==target_event]
time=time[event_id==target_event]
ax = np.newaxis
#Dimension: 2120 x 4: x,y,z,t
event_hits = np.concatenate([pos_x[:, ax], pos_y[:, ax], pos_z[:, ax], time[:, ax]], axis=1)
def compute_histograms():
filename_geo_limits = 'Daten\ORCA_Geo_115lines.txt'
n_bins=(11,13,18,50)
x_bin_edges, y_bin_edges, z_bin_edges = calculate_bin_edges( n_bins, filename_geo_limits)
all_4d_to_2d_hists = []
compute_4d_to_2d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_2d_hists)
#4 diagramme im array, jeweils 3d matrizen
all_4d_to_3d_hists = []
compute_4d_to_3d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_3d_hists)
#1 diagramm im array, 4d matrix
all_4d_to_4d_hists = []
compute_4d_to_4d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_4d_hists)
return all_4d_to_2d_hists, all_4d_to_3d_hists, all_4d_to_4d_hists
#all_4d_to_2d_hists, all_4d_to_3d_hists, all_4d_to_4d_hists = compute_histograms()
#np.save("Daten/test_hist_2d", all_4d_to_2d_hists)
#np.save("Daten/test_hist_3d", all_4d_to_3d_hists)
#np.save("Daten/test_hist_4d", all_4d_to_4d_hists)
def store_histograms_as_hdf5(hists, filepath_output):
h5f = h5py.File(filepath_output, 'w')
dset_hists = h5f.create_dataset('x', data=hists, dtype='uint8')
#dset_mc_infos = h5f.create_dataset('y', data=mc_infos, dtype='float32')
h5f.close()
def main(n_bins, do2d=True, do2d_pdf=(False, 10), do3d=True, do4d=False, do_mc_hits=False, use_calibrated_file=True, data_cuts=None):
"""
Main code. Reads raw .hdf5 files and creates 2D/3D histogram projections that can be used for a CNN
:param tuple(int) n_bins: Declares the number of bins that should be used for each dimension (x,y,z,t).
:param bool do2d: Declares if 2D histograms should be created.
:param (bool, int) do2d_pdf: Declares if pdf visualizations of the 2D histograms should be created. Cannot be called if do2d=False.
The event loop will be stopped after the integer specified in the second argument.
:param bool do3d: Declares if 3D histograms should be created.
:param bool do4d: Declares if 4D histograms should be created.
:param bool do_mc_hits: Declares if hits (False, mc_hits + BG) or mc_hits (True) should be processed
:param bool use_calibrated_file: Declares if the input file is already calibrated (pos_x/y/z, time) or not.
:param dict data_cuts: Dictionary that contains information about any possible cuts that should be applied.
Supports the following cuts: 'triggered', 'energy_lower_limit'
"""
if data_cuts is None: data_cuts={'triggered': False, 'energy_lower_limit': 0}
filename_geo_limits = 'Daten/ORCA_Geo_115lines.txt' # used for calculating the dimensions of the ORCA can
x_bin_edges, y_bin_edges, z_bin_edges = calculate_bin_edges(n_bins, filename_geo_limits)
all_4d_to_2d_hists, all_4d_to_3d_hists, all_4d_to_4d_hists = [], [], []
mc_infos = []
# Initialize HDF5Pump of the input file
event_pump = kp.io.hdf5.HDF5Pump(filename="Daten\example.h5")
#print "Generating histograms from the hits in XYZT format for files based on " + filename_input
for i, event_blob in enumerate(event_pump):
print(i,event_blob)
if i % 10 == 0:
print ('Event No. ' + str(i))
# filter out all hit and track information belonging that to this event
geo=None
event_hits, event_track = get_event_data(event_blob, geo)
if event_track[2] < data_cuts['energy_lower_limit']: # Cutting events with energy < threshold (default=0)
#print 'Cut an event with an energy of ' + str(event_track[2]) + ' GeV'
continue
# event_track: [event_id, particle_type, energy, isCC, bjorkeny, dir_x/y/z, time]
mc_infos.append(event_track)
if do2d:
compute_4d_to_2d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_2d_hists, event_track, do2d_pdf[0])
if do3d:
compute_4d_to_3d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, n_bins, all_4d_to_3d_hists)
if do4d:
compute_4d_to_4d_histograms(event_hits, x_bin_edges, y_bin_edges, z_bin_edges, all_4d_to_4d_hists)
if do2d_pdf[0] is True:
if i >= do2d_pdf[1]:
glob.pdf_2d_plots.close()
break
if do3d:
store_histograms_as_hdf5(np.stack([hist_tuple[0] for hist_tuple in all_4d_to_3d_hists]), np.array(mc_infos), 'Daten/xyz.h5')
store_histograms_as_hdf5(np.stack([hist_tuple[1] for hist_tuple in all_4d_to_3d_hists]), np.array(mc_infos), 'Daten/xyt.h5')
store_histograms_as_hdf5(np.stack([hist_tuple[2] for hist_tuple in all_4d_to_3d_hists]), np.array(mc_infos), 'Daten/xzt.h5')
store_histograms_as_hdf5(np.stack([hist_tuple[3] for hist_tuple in all_4d_to_3d_hists]), np.array(mc_infos), 'Daten/yzt.h5')
main(n_bins=(11,13,18,50))
| mit |
AaltoML/kalman-jax | kalmanjax/notebooks/comparison.py | 1 | 3827 | import sys
sys.path.insert(0, '../')
import numpy as np
from jax.nn import softplus
from jax.experimental import optimizers
import matplotlib.pyplot as plt
import time
from sde_gp import SDEGP
import approximate_inference as approx_inf
import priors
import likelihoods
pi = 3.141592653589793
print('generating some data ...')
np.random.seed(99)
N = 1000 # number of training points
x = 100 * np.random.rand(N)
f = 6 * np.sin(pi * x / 10.0) / (pi * x / 10.0 + 1)
y_ = f + np.math.sqrt(0.05)*np.random.randn(x.shape[0])
y = np.maximum(np.sign(y_), 0.)
x_test = np.linspace(np.min(x)-10.0, np.max(x)+10.0, num=500)
var_f = 1.0 # GP variance
len_f = 5.0 # GP lengthscale
prior = priors.Matern52(variance=var_f, lengthscale=len_f)
lik = likelihoods.Probit()
approx_inf_1 = approx_inf.EP()
approx_inf_2 = approx_inf.VI()
model_1 = SDEGP(prior=prior, likelihood=lik, t=x, y=y, t_test=x_test, approx_inf=approx_inf_1)
model_2 = SDEGP(prior=prior, likelihood=lik, t=x, y=y, t_test=x_test, approx_inf=approx_inf_2)
opt_init, opt_update, get_params = optimizers.adam(step_size=5e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model_1.prior.hyp, model_1.likelihood.hyp])
def gradient_step(i, state, mod):
params = get_params(state)
mod.prior.hyp = params[0]
mod.likelihood.hyp = params[1]
neg_log_marg_lik, gradients = mod.run()
# neg_log_marg_lik, gradients = mod.run_two_stage() # <-- less elegant but reduces compile time
print('iter %2d: var_f=%1.2f len_f=%1.2f, nlml=%2.2f' %
(i, softplus(params[0][0]), softplus(params[0][1]), neg_log_marg_lik))
return opt_update(i, gradients, state)
# print('optimising the hyperparameters ...')
# t0 = time.time()
# for j in range(20):
# opt_state = gradient_step(j, opt_state, sde_gp_model_1)
# t1 = time.time()
# print('optimisation time: %2.2f secs' % (t1-t0))
for i in range(5):
model_1.run()
model_2.run()
# calculate posterior predictive distribution via filtering and smoothing at train & test locations:
print('calculating the posterior predictive distribution ...')
t0 = time.time()
posterior_mean_1, posterior_var_1, _, nlpd1 = model_1.predict()
posterior_mean_2, posterior_var_2, _, nlpd2 = model_2.predict()
t1 = time.time()
print('prediction time: %2.2f secs' % (t1-t0))
print(model_1.sites.site_params[0][100] - model_2.sites.site_params[0][100])
print(posterior_mean_1 - posterior_mean_2)
lb_1 = posterior_mean_1[:, 0] - 1.96 * posterior_var_1[:, 0]**0.5
ub_1 = posterior_mean_1[:, 0] + 1.96 * posterior_var_1[:, 0]**0.5
lb_2 = posterior_mean_2[:, 0] - 1.96 * posterior_var_2[:, 0]**0.5
ub_2 = posterior_mean_2[:, 0] + 1.96 * posterior_var_2[:, 0]**0.5
x_pred = model_1.t_all
test_id = model_1.test_id
t_test = model_1.t_all[test_id]
link_fn = model_1.likelihood.link_fn
# print('sampling from the posterior ...')
# t0 = time.time()
# posterior_samp = sde_gp_model_1.posterior_sample(20)
# t1 = time.time()
# print('sampling time: %2.2f secs' % (t1-t0))
print('plotting ...')
plt.figure(1, figsize=(12, 5))
plt.clf()
plt.plot(x, y, 'b+', label='observations')
plt.plot(x_pred, link_fn(posterior_mean_1[:, 0]), 'm', label='posterior mean')
plt.plot(x_pred, link_fn(posterior_mean_2[:, 0]), 'g', label='posterior mean')
# plt.fill_between(x_pred, link_fn(lb_1), link_fn(ub_1), color='m', alpha=0.05, label='95% confidence')
plt.plot(x_pred, link_fn(lb_1), color='m', alpha=0.3)
plt.plot(x_pred, link_fn(ub_1), color='m', alpha=0.3)
plt.plot(x_pred, link_fn(lb_2), color='g', alpha=0.3)
plt.plot(x_pred, link_fn(ub_2), color='g', alpha=0.3)
# plt.plot(sde_gp_model_1.t_test, link_fn(posterior_samp[test_id, 0, :]), 'm', alpha=0.15)
plt.xlim(t_test[0], t_test[-1])
plt.legend()
plt.title('GP classification via Kalman smoothing')
plt.xlabel('time - $t$')
plt.show()
| apache-2.0 |
mikeireland/opticstools | playground/metrology_fringes.py | 1 | 1858 | from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import sys
if not '..' in sys.path:
sys.path.insert(0,'..')
import opticstools as ot
import scipy.ndimage as nd
#Lets start with some definitions. Put everything in length units of mm
mm_pix = 0.0025
sz = 4096 #Size in pixels.
ulens_diameter = 0.25
d_to_lens1 = 2.0
wave=800e-6
flength1 = 20.
cyl_flength = 1
flength2 = 20.
#-----
one_aperture = ot.utils.circle(sz, ulens_diameter/mm_pix, interp_edge=True)
lens1 = ot.curved_wf(sz, mm_pix, wave=wave, f_length=flength1)
initial_wf = nd.interpolation.shift(one_aperture, [-3*ulens_diameter/mm_pix,0]) + \
nd.interpolation.shift(one_aperture, [3*ulens_diameter/mm_pix,0]) + nd.interpolation.shift(one_aperture, [1*ulens_diameter/mm_pix,0])
wf_at_sph_lens = ot.propagate_by_fresnel(initial_wf, mm_pix, d_to_lens1, wave)
wf_at_cyl_lens = ot.propagate_by_fresnel(wf_at_sph_lens*lens1, mm_pix, flength1-cyl_flength, wave)
print("Made it to cylindrical lens!")
#Now the painful bit: create a cylindrical lens.
x = np.arange(sz) - sz//2
xy = np.meshgrid(x,x)
power = 1.0/cyl_flength
phase = 0.5*mm_pix**2/wave*power*xy[0]**2
cyl_wf=np.exp(2j*np.pi*phase)
#Find the final fringes!
image_plane_E = ot.propagate_by_fresnel(wf_at_cyl_lens*cyl_wf, mm_pix, cyl_flength, wave)
print("Made it to image plane!")
plt.figure(1)
plt.clf()
plt.imshow(np.abs(image_plane_E))
#for i in range(20):
# plt.clf()
# wf = ot.propagate_by_fresnel(wf_at_sph_lens*lens1, mm_pix, i*0.5, wave)
# plt.imshow(np.abs(wf))
# plt.pause(.01)
wf_at_lens1 = ot.propagate_by_fresnel(image_plane_E, mm_pix, flength2, wave)
lens2 = ot.curved_wf(sz, mm_pix, wave=wave, f_length=flength2)
wf_at_grating = ot.propagate_by_fresnel(wf_at_lens1*lens2, mm_pix, flength2, wave)
plt.figure(2)
plt.clf()
plt.imshow(np.abs(wf_at_grating)) | mit |
ThomasMiconi/nupic.research | projects/associative_network/run_hopfield_network_experiment.py | 11 | 15603 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Experiment with associative network that uses SDRs
Two experiments are included in this script
1. Capacity experiment: How many unique items can a network store such that
each item can be reliably retrieved?
2. Simultaneously retrieve multiple items by relaxing the sparsity
"""
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
plt.ion()
plt.close('all')
class hyperColumnNetwork(object):
def __init__(self,
numHyperColumn,
numNeuronPerHyperColumn,
numActiveNeuronPerHyperColumn,
numInputs,
minThreshold=0,
matchThreshold=10):
self.numHyperColumn = numHyperColumn
self.numNeuronPerHyperColumn = numNeuronPerHyperColumn
self.numActiveNeuronPerHyperColumn = numActiveNeuronPerHyperColumn
self.numInputs = numInputs
self.minThreshold = minThreshold
self.matchThreshold = matchThreshold
self.numNeuronTotal = numHyperColumn * numNeuronPerHyperColumn
# initialize weight matrix
self.weightFF = np.eye(self.numNeuronTotal, numInputs)
self.weightRecurrent = np.zeros((self.numNeuronTotal, self.numNeuronTotal))
def initializeObjectSDRs(self, numObjects, seed=1):
# initialize object SDRs in HC
np.random.seed(seed)
objectSDRActiveBits = []
for i in range(numObjects):
objectSDRActiveBits.append([])
for j in range(self.numHyperColumn):
randomCells = np.random.permutation(range(self.numNeuronPerHyperColumn))
objectSDRActiveBits[i].append(
randomCells[:self.numActiveNeuronPerHyperColumn])
return objectSDRActiveBits
def memorizeObjectSDRs(self, objectSDRActiveBits):
numObjects = len(objectSDRActiveBits)
# initialize recurrent connections
self.weightRecurrent = np.zeros((self.numNeuronTotal, self.numNeuronTotal))
for i in range(numObjects):
offset = 0
objectSDR = np.zeros((self.numNeuronTotal, 1))
for j in range(self.numHyperColumn):
objectSDR[offset+objectSDRActiveBits[i][j], 0] = 1
offset += self.numNeuronPerHyperColumn
self.weightRecurrent += np.dot(objectSDR, np.transpose(objectSDR))
for i in range(self.numNeuronTotal):
self.weightRecurrent[i, i] = 0
def run(self, initialState, feedforwardInputs):
"""
Run network for multiple steps
:param initialState:
:param feedforwardInputs: list of feedforward inputs
:return: list of active cell indices over time
"""
currentState = initialState
activeStateHistory = [np.where(initialState > 0)[0]]
numStep = len(feedforwardInputs)
for i in range(numStep):
currentState = self.runSingleStep(currentState,
feedforwardInputs[i])
activeStateHistory.append([np.where(currentState > 0)[0]])
return activeStateHistory
def runSingleStep(self,
previousState,
feedforwardInputs):
"""
Run network for one step
:param previousState: a (Ncell, 1) numpy array of network states
:param maxNumberOfActiveCellsPerColumn: maximum number of active cells per
column
:return: newState
"""
print "previous activeCells ", np.sort(np.where(previousState>0)[0])
feedforwardInputOverlap = np.dot(self.weightFF, feedforwardInputs)
lateralInputOverlap = np.dot(self.weightRecurrent, previousState)
totalInput = feedforwardInputOverlap + lateralInputOverlap
print "feedforwardInputOverlap: ", np.sort(np.where(feedforwardInputOverlap>0)[0])
# cells with active feedforward zone
feedforwardActive = feedforwardInputOverlap > self.minThreshold
# cells with active distal zone (that receives lateral connections)
lateralActive = lateralInputOverlap > self.minThreshold
# cells with both active feedforward zone and lateral zone
strongActive = np.logical_and(feedforwardActive, lateralActive)
newState = np.zeros((self.numNeuronTotal, 1))
offset = 0
for i in range(self.numHyperColumn):
numberOfStrongActiveCellsInColumn = np.sum(
strongActive[offset:offset+self.numNeuronPerHyperColumn])
print "numberOfStrongActiveCellsInColumn: ", numberOfStrongActiveCellsInColumn
if numberOfStrongActiveCellsInColumn > self.matchThreshold:
self.numActiveNeuronPerHyperColumn = self.numActiveNeuronPerHyperColumn/2
w = self.numActiveNeuronPerHyperColumn
cellIdx = np.argsort(totalInput[offset:offset+self.numNeuronPerHyperColumn, 0])
activeCells = cellIdx[-w:] + offset
activeCells = activeCells[np.where(
totalInput[activeCells] > self.minThreshold)[0]]
newState[activeCells] = 1
print "activeCells ", np.sort(activeCells)
offset += self.numNeuronPerHyperColumn
return newState
def convertActiveCellsToSDRs(activeStateHistory, numCells):
"""
Convert list of active cell indices to a list of SDRs
:param activeStateHistory: list of active cell indices per step
:param numCells: total number of cells
:return: sdrHistory numpy array of (numStep, numCells)
"""
numStep = len(activeStateHistory)
sdrHistory = np.zeros((numStep, numCells))
for i in range(numStep):
sdrHistory[i, activeStateHistory[i]] = 1
return sdrHistory
def stripSDRHistoryForDisplay(sdrHistory, removePortion=0.5):
"""
Strip SDR History (remove unused bits) for display purpose
:param sdrHistory:
:return: displayBitIndex
"""
sdrHistorySum = np.sum(sdrHistory, axis=0)
unusedBitIndices = np.where(sdrHistorySum == 0)[0]
usedBitIndices = np.where(sdrHistorySum > 1)[0]
numUnusedBitKeep = int(len(unusedBitIndices) * (1-removePortion))
unusedBitIndices = np.random.permutation(unusedBitIndices)
unusedBitIndices = unusedBitIndices[:numUnusedBitKeep]
displayBitIndex = np.concatenate((usedBitIndices, unusedBitIndices))
displayBitIndex = np.sort(displayBitIndex)
return displayBitIndex
def generateSDRforDisplay(numNeuron, activeBits, displayBitIndex):
sdrForDisplay = np.zeros((1, numNeuron))
sdrForDisplay[0, activeBits] = 1
sdrForDisplay = np.matlib.repmat(sdrForDisplay[:, displayBitIndex], 10, 1)
return sdrForDisplay
def runSingleExperiment(numObjects, numBitNoise, seed=10):
np.random.seed(seed)
hcNet = hyperColumnNetwork(numHyperColumn=1,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024)
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=seed)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
objectIDTest = np.random.choice(numObjects, 100)
finalOverlapList = []
for objectID in objectIDTest:
initialState = np.zeros((hcNet.numNeuronTotal, 1))
randomCells = np.random.permutation(range(hcNet.numNeuronTotal))
initialState[objectSDRActiveBits[objectID][0][:(20-numBitNoise)]] = 1
initialState[randomCells[:numBitNoise]] = 1
feedforwardInputs = [np.zeros((hcNet.numNeuronTotal, 1))] * 5
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
initialActiveCells = np.where(sdrHistory[0, :] > 0)[0]
finalActiveCells = np.where(sdrHistory[-1, :] > 0)[0]
finalOverlap = len(
set(objectSDRActiveBits[objectID][0]).intersection(finalActiveCells))
initialOverlap = len(
set(objectSDRActiveBits[objectID][0]).intersection(initialActiveCells))
finalOverlapList.append(finalOverlap)
return finalOverlapList
def capacityExperiment():
numObjectList = np.linspace(start=100, stop=2000, num=10).astype('int')
numBitNoiseList = [2, 4, 8, 10, 15]
numRpts = 3
avgFinalOverlap = np.zeros(
(numRpts, len(numBitNoiseList), len(numObjectList)))
for i in range(len(numBitNoiseList)):
for j in range(len(numObjectList)):
for rpt in range(3):
print "run experiment with object # {} noise # {} rpt {}".format(
numObjectList[j], numBitNoiseList[i], rpt
)
finalOverlap = runSingleExperiment(numObjectList[j],
numBitNoiseList[i], seed=rpt)
avgFinalOverlap[rpt, i, j] = (np.mean(finalOverlap))
plt.figure()
finalOverlaps = np.mean(avgFinalOverlap, 0)
legendList = []
for i in range(len(numBitNoiseList)):
plt.plot(numObjectList, finalOverlaps[i, :])
legendList.append("noise = {}".format(numBitNoiseList[i]))
plt.legend(legendList)
plt.plot([140, 140], [0, 20], 'k--')
plt.xlabel('Number of Object')
plt.ylabel('Overlap(retrieved sdr, original sdr)')
plt.savefig('capacity_experiment_result.pdf')
def retrieveMultipleItems():
hcNet = hyperColumnNetwork(numHyperColumn=1,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024,
minThreshold=0)
numObjects = 100
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=42)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
hcNet.numActiveNeuronPerHyperColumn = 40
objectID1 = 1
objectID2 = 2
ambiguousInput = np.zeros((hcNet.numNeuronTotal, 1))
ambiguousInput[objectSDRActiveBits[objectID1][0][:10]] = 10
ambiguousInput[objectSDRActiveBits[objectID2][0][:10]] = 10
nStep = 20
feedforwardInputs = [ambiguousInput]
for i in range(1, nStep):
feedforwardInputs.append(np.zeros((hcNet.numNeuronTotal, 1)))
feedforwardInputs[10][objectSDRActiveBits[objectID1][0]] = 1
initialState = np.zeros((hcNet.numNeuronTotal, 1))
# initialState = ambiguousInput
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
displayBitIndex = stripSDRHistoryForDisplay(sdrHistory, removePortion=0.9)
initialActiveCells = np.where(sdrHistory[0, :] > 0)[0]
print initialActiveCells
finalActiveCells = np.where(sdrHistory[-1, :] > 0)[0]
initialOverlap1 = len(
set(objectSDRActiveBits[objectID1][0]).intersection(initialActiveCells))
initialOverlap2 = len(
set(objectSDRActiveBits[objectID2][0]).intersection(initialActiveCells))
finalOverlap1 = len(
set(objectSDRActiveBits[objectID1][0]).intersection(finalActiveCells))
finalOverlap2 = len(
set(objectSDRActiveBits[objectID2][0]).intersection(finalActiveCells))
print "Initial overlap with object SDR 1: {}".format(initialOverlap1)
print "Initial overlap with object SDR 2: {}".format(initialOverlap2)
print "Final overlap with object SDR 1: {}".format(finalOverlap1)
print "Final overlap with object SDR 2: {}".format(finalOverlap2)
fig, ax = plt.subplots(nrows=4, ncols=1)
object1SDR = generateSDRforDisplay(hcNet.numNeuronTotal,
objectSDRActiveBits[objectID1],
displayBitIndex)
object2SDR = generateSDRforDisplay(hcNet.numNeuronTotal,
objectSDRActiveBits[objectID2],
displayBitIndex)
querySDR = np.matlib.repmat(np.transpose(ambiguousInput[displayBitIndex]), 10, 1)
ax[0].imshow(object1SDR, cmap='gray')
ax[0].set_title('SDR for Object A')
ax[1].imshow(object2SDR, cmap='gray')
ax[1].set_title('SDR for Object B')
ax[2].imshow(querySDR, cmap='gray')
ax[2].set_title('query SDR')
ax[3].imshow(sdrHistory[:, displayBitIndex], cmap='gray')
ax[3].set_title('Network states over time')
plt.savefig('figures/retrieveMultipleItems.pdf')
def multipleHyperColumn():
hcNet = hyperColumnNetwork(numHyperColumn=3,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024*3,
minThreshold=0)
numObjects = 10
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=40)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
initialState = np.zeros((hcNet.numNeuronTotal, 1))
objectID1 = 1
offset = 0
ambiguousInput = np.zeros((hcNet.numNeuronTotal, 1))
for i in range(hcNet.numHyperColumn):
if i != 1:
ambiguousInput[offset + objectSDRActiveBits[objectID1][i][:20]] = 1
# initialState[offset + objectSDRActiveBits[objectID2][i][:10]] = 1
offset += hcNet.numNeuronPerHyperColumn
nStep = 10
feedforwardInputs = [ambiguousInput]
for i in range(1, nStep):
feedforwardInputs.append(np.zeros((hcNet.numNeuronTotal, 1)))
hcNet.numActiveNeuronPerHyperColumn = 20
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
offset = 0
plt.figure()
fig, ax = plt.subplots(3, 3)
for i in range(hcNet.numHyperColumn):
activationColumnI = sdrHistory[:, offset:(offset+hcNet.numNeuronPerHyperColumn)]
initialOverlap1 = len(
set(objectSDRActiveBits[objectID1][i]).intersection(set(np.where(activationColumnI[0, :]>0)[0])))
finalOverlap1 = len(
set(objectSDRActiveBits[objectID1][i]).intersection(set(np.where(activationColumnI[-1, :]>0)[0])))
print "initial Overlap with column {} : {}".format(i, initialOverlap1)
print "final Overlap with column {} : {}".format(i, finalOverlap1)
displayBitIndex = stripSDRHistoryForDisplay(activationColumnI, removePortion=0.9)
displayBitIndex = displayBitIndex[:30]
object1SDR = generateSDRforDisplay(hcNet.numNeuronPerHyperColumn,
objectSDRActiveBits[objectID1][i],
displayBitIndex)
ffInput = np.zeros((nStep, hcNet.numNeuronPerHyperColumn))
for s in range(nStep):
ffInput[s, :] = feedforwardInputs[s][offset:(offset+hcNet.numNeuronPerHyperColumn)][:, 0]
ax[0, i].imshow(object1SDR, cmap='gray')
ax[0, i].set_title('Module {}'.format(i))
ax[1, i].imshow(ffInput[:, displayBitIndex], cmap='gray')
ax[1, i].set_ylabel('ffInput')
ax[2, i].imshow(activationColumnI[:, displayBitIndex], cmap='gray')
ax[2, i].set_ylabel('Network state')
offset += hcNet.numNeuronPerHyperColumn
plt.savefig('figures/experimentMultipleModules.pdf')
if __name__ == "__main__":
multipleHyperColumn()
| agpl-3.0 |
Ialong/shogun | examples/undocumented/python_modular/graphical/regression_lars.py | 26 | 3327 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RegressionLabels, RealFeatures
from modshogun import LeastAngleRegression, LinearRidgeRegression, LeastSquaresRegression
from modshogun import MeanSquaredError
# we compare LASSO with ordinary least-squares (OLE)
# in the ideal case, the MSE of OLE should coincide
# with LASSO at the end of the path
#
# if OLE is unstable, we may use RidgeRegression (with
# a small regularization coefficient) to simulate OLE
use_ridge = False
np.random.seed(1024)
n = 200
ntrain = 100
p = 7
correlation = 0.6
mean = np.zeros(p)
cov = correlation*np.ones((p,p)) + (1-correlation)*np.eye(p)
Xall = np.random.multivariate_normal(mean, cov, n)
# model is the linear combination of the first three variables plus noise
yall = 2*Xall[:,0] + 5*Xall[:,1] + -3*Xall[:,2] + 0.5*np.random.randn(n)
X = Xall[0:ntrain,:]
y = yall[0:ntrain]
Xtest = Xall[ntrain:,:]
ytest = yall[ntrain:]
# preprocess data
for i in xrange(p):
X[:,i] -= np.mean(X[:,i])
X[:,i] /= np.linalg.norm(X[:,i])
y -= np.mean(y)
# train LASSO
LeastAngleRegression = LeastAngleRegression()
LeastAngleRegression.set_labels(RegressionLabels(y))
LeastAngleRegression.train(RealFeatures(X.T))
# train ordinary LSR
if use_ridge:
lsr = LinearRidgeRegression(0.01, RealFeatures(X.T), Labels(y))
lsr.train()
else:
lsr = LeastSquaresRegression()
lsr.set_labels(RegressionLabels(y))
lsr.train(RealFeatures(X.T))
# gather LASSO path
path = np.zeros((p, LeastAngleRegression.get_path_size()))
for i in xrange(path.shape[1]):
path[:,i] = LeastAngleRegression.get_w(i)
evaluator = MeanSquaredError()
# apply on training data
mse_train = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_train.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(X.T))
mse_train[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(X.T))
mse_train_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
# apply on test data
mse_test = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_test.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(Xtest.T))
mse_test[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(Xtest.T))
mse_test_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
fig = plt.figure()
ax_path = fig.add_subplot(1,2,1)
plt.plot(xrange(path.shape[1]), path.T, '.-')
plt.legend(['%d' % (x+1) for x in xrange(path.shape[0])])
plt.xlabel('iteration')
plt.title('LASSO path')
ax_tr = fig.add_subplot(2,2,2)
plt.plot(range(mse_train.shape[0])[1:], mse_train[1:], 'k.-')
plt.plot(range(mse_train.shape[0])[1:], np.zeros(mse_train.shape[0])[1:] + mse_train_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'))
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on training data')
ax_tt = fig.add_subplot(2,2,4)
plt.plot(range(mse_test.shape[0])[1:], mse_test[1:], 'k.-')
plt.plot(range(mse_test.shape[0])[1:], np.zeros(mse_test.shape[0])[1:] + mse_test_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'), loc='lower right')
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on test data')
plt.show()
| gpl-3.0 |
mariusvniekerk/ibis | ibis/config.py | 16 | 20779 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
nivwusquorum/tensorflow-deepq | tf_rl/simulation/karpathy_game.py | 1 | 14206 | import math
import matplotlib.pyplot as plt
import numpy as np
import random
import time
from collections import defaultdict
from euclid import Circle, Point2, Vector2, LineSegment2
from ..utils import svg
from IPython.display import clear_output, display, HTML
class GameObject(object):
def __init__(self, position, speed, obj_type, settings):
"""Esentially represents circles of different kinds, which have
position and speed."""
self.settings = settings
self.radius = self.settings["object_radius"]
self.obj_type = obj_type
self.position = position
self.speed = speed
self.bounciness = 1.0
def wall_collisions(self):
"""Update speed upon collision with the wall."""
world_size = self.settings["world_size"]
for dim in range(2):
if self.position[dim] - self.radius <= 0 and self.speed[dim] < 0:
self.speed[dim] = - self.speed[dim] * self.bounciness
elif self.position[dim] + self.radius + 1 >= world_size[dim] and self.speed[dim] > 0:
self.speed[dim] = - self.speed[dim] * self.bounciness
def move(self, dt):
"""Move as if dt seconds passed"""
self.position += dt * self.speed
self.position = Point2(*self.position)
def step(self, dt):
"""Move and bounce of walls."""
self.wall_collisions()
self.move(dt)
def as_circle(self):
return Circle(self.position, float(self.radius))
def draw(self):
"""Return svg object for this item."""
color = self.settings["colors"][self.obj_type]
return svg.Circle(self.position + Point2(10, 10), self.radius, color=color)
class KarpathyGame(object):
def __init__(self, settings):
"""Initiallize game simulator with settings"""
self.settings = settings
self.size = self.settings["world_size"]
self.walls = [LineSegment2(Point2(0,0), Point2(0,self.size[1])),
LineSegment2(Point2(0,self.size[1]), Point2(self.size[0], self.size[1])),
LineSegment2(Point2(self.size[0], self.size[1]), Point2(self.size[0], 0)),
LineSegment2(Point2(self.size[0], 0), Point2(0,0))]
self.hero = GameObject(Point2(*self.settings["hero_initial_position"]),
Vector2(*self.settings["hero_initial_speed"]),
"hero",
self.settings)
if not self.settings["hero_bounces_off_walls"]:
self.hero.bounciness = 0.0
self.objects = []
for obj_type, number in settings["num_objects"].items():
for _ in range(number):
self.spawn_object(obj_type)
self.observation_lines = self.generate_observation_lines()
self.object_reward = 0
self.collected_rewards = []
# every observation_line sees one of objects or wall and
# two numbers representing speed of the object (if applicable)
self.eye_observation_size = len(self.settings["objects"]) + 3
# additionally there are two numbers representing agents own speed and position.
self.observation_size = self.eye_observation_size * len(self.observation_lines) + 2 + 2
self.directions = [Vector2(*d) for d in [[1,0], [0,1], [-1,0],[0,-1],[0.0,0.0]]]
self.num_actions = len(self.directions)
self.objects_eaten = defaultdict(lambda: 0)
def perform_action(self, action_id):
"""Change speed to one of hero vectors"""
assert 0 <= action_id < self.num_actions
self.hero.speed *= 0.5
self.hero.speed += self.directions[action_id] * self.settings["delta_v"]
def spawn_object(self, obj_type):
"""Spawn object of a given type and add it to the objects array"""
radius = self.settings["object_radius"]
position = np.random.uniform([radius, radius], np.array(self.size) - radius)
position = Point2(float(position[0]), float(position[1]))
max_speed = np.array(self.settings["maximum_speed"])
speed = np.random.uniform(-max_speed, max_speed).astype(float)
speed = Vector2(float(speed[0]), float(speed[1]))
self.objects.append(GameObject(position, speed, obj_type, self.settings))
def step(self, dt):
"""Simulate all the objects for a given ammount of time.
Also resolve collisions with the hero"""
for obj in self.objects + [self.hero] :
obj.step(dt)
self.resolve_collisions()
def squared_distance(self, p1, p2):
return (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
def resolve_collisions(self):
"""If hero touches, hero eats. Also reward gets updated."""
collision_distance = 2 * self.settings["object_radius"]
collision_distance2 = collision_distance ** 2
to_remove = []
for obj in self.objects:
if self.squared_distance(self.hero.position, obj.position) < collision_distance2:
to_remove.append(obj)
for obj in to_remove:
self.objects.remove(obj)
self.objects_eaten[obj.obj_type] += 1
self.object_reward += self.settings["object_reward"][obj.obj_type]
self.spawn_object(obj.obj_type)
def inside_walls(self, point):
"""Check if the point is inside the walls"""
EPS = 1e-4
return (EPS <= point[0] < self.size[0] - EPS and
EPS <= point[1] < self.size[1] - EPS)
def observe(self):
"""Return observation vector. For all the observation directions it returns representation
of the closest object to the hero - might be nothing, another object or a wall.
Representation of observation for all the directions will be concatenated.
"""
num_obj_types = len(self.settings["objects"]) + 1 # and wall
max_speed_x, max_speed_y = self.settings["maximum_speed"]
observable_distance = self.settings["observation_line_length"]
relevant_objects = [obj for obj in self.objects
if obj.position.distance(self.hero.position) < observable_distance]
# objects sorted from closest to furthest
relevant_objects.sort(key=lambda x: x.position.distance(self.hero.position))
observation = np.zeros(self.observation_size)
observation_offset = 0
for i, observation_line in enumerate(self.observation_lines):
# shift to hero position
observation_line = LineSegment2(self.hero.position + Vector2(*observation_line.p1),
self.hero.position + Vector2(*observation_line.p2))
observed_object = None
# if end of observation line is outside of walls, we see the wall.
if not self.inside_walls(observation_line.p2):
observed_object = "**wall**"
for obj in relevant_objects:
if observation_line.distance(obj.position) < self.settings["object_radius"]:
observed_object = obj
break
object_type_id = None
speed_x, speed_y = 0, 0
proximity = 0
if observed_object == "**wall**": # wall seen
object_type_id = num_obj_types - 1
# a wall has fairly low speed...
speed_x, speed_y = 0, 0
# best candidate is intersection between
# observation_line and a wall, that's
# closest to the hero
best_candidate = None
for wall in self.walls:
candidate = observation_line.intersect(wall)
if candidate is not None:
if (best_candidate is None or
best_candidate.distance(self.hero.position) >
candidate.distance(self.hero.position)):
best_candidate = candidate
if best_candidate is None:
# assume it is due to rounding errors
# and wall is barely touching observation line
proximity = observable_distance
else:
proximity = best_candidate.distance(self.hero.position)
elif observed_object is not None: # agent seen
object_type_id = self.settings["objects"].index(observed_object.obj_type)
speed_x, speed_y = tuple(observed_object.speed)
intersection_segment = obj.as_circle().intersect(observation_line)
assert intersection_segment is not None
try:
proximity = min(intersection_segment.p1.distance(self.hero.position),
intersection_segment.p2.distance(self.hero.position))
except AttributeError:
proximity = observable_distance
for object_type_idx_loop in range(num_obj_types):
observation[observation_offset + object_type_idx_loop] = 1.0
if object_type_id is not None:
observation[observation_offset + object_type_id] = proximity / observable_distance
observation[observation_offset + num_obj_types] = speed_x / max_speed_x
observation[observation_offset + num_obj_types + 1] = speed_y / max_speed_y
assert num_obj_types + 2 == self.eye_observation_size
observation_offset += self.eye_observation_size
observation[observation_offset] = self.hero.speed[0] / max_speed_x
observation[observation_offset + 1] = self.hero.speed[1] / max_speed_y
observation_offset += 2
# add normalized locaiton of the hero in environment
observation[observation_offset] = self.hero.position[0] / 350.0 - 1.0
observation[observation_offset + 1] = self.hero.position[1] / 250.0 - 1.0
assert observation_offset + 2 == self.observation_size
return observation
def distance_to_walls(self):
"""Returns distance of a hero to walls"""
res = float('inf')
for wall in self.walls:
res = min(res, self.hero.position.distance(wall))
return res - self.settings["object_radius"]
def collect_reward(self):
"""Return accumulated object eating score + current distance to walls score"""
wall_reward = self.settings["wall_distance_penalty"] * \
np.exp(-self.distance_to_walls() / self.settings["tolerable_distance_to_wall"])
assert wall_reward < 1e-3, "You are rewarding hero for being close to the wall!"
total_reward = wall_reward + self.object_reward
self.object_reward = 0
self.collected_rewards.append(total_reward)
return total_reward
def plot_reward(self, smoothing = 30):
"""Plot evolution of reward over time."""
plottable = self.collected_rewards[:]
while len(plottable) > 1000:
for i in range(0, len(plottable) - 1, 2):
plottable[i//2] = (plottable[i] + plottable[i+1]) / 2
plottable = plottable[:(len(plottable) // 2)]
x = []
for i in range(smoothing, len(plottable)):
chunk = plottable[i-smoothing:i]
x.append(sum(chunk) / len(chunk))
plt.plot(list(range(len(x))), x)
def generate_observation_lines(self):
"""Generate observation segments in settings["num_observation_lines"] directions"""
result = []
start = Point2(0.0, 0.0)
end = Point2(self.settings["observation_line_length"],
self.settings["observation_line_length"])
for angle in np.linspace(0, 2*np.pi, self.settings["num_observation_lines"], endpoint=False):
rotation = Point2(math.cos(angle), math.sin(angle))
current_start = Point2(start[0] * rotation[0], start[1] * rotation[1])
current_end = Point2(end[0] * rotation[0], end[1] * rotation[1])
result.append( LineSegment2(current_start, current_end))
return result
def _repr_html_(self):
return self.to_html()
def to_html(self, stats=[]):
"""Return svg representation of the simulator"""
stats = stats[:]
recent_reward = self.collected_rewards[-100:] + [0]
objects_eaten_str = ', '.join(["%s: %s" % (o,c) for o,c in self.objects_eaten.items()])
stats.extend([
"nearest wall = %.1f" % (self.distance_to_walls(),),
"reward = %.1f" % (sum(recent_reward)/len(recent_reward),),
"objects eaten => %s" % (objects_eaten_str,),
])
scene = svg.Scene((self.size[0] + 20, self.size[1] + 20 + 20 * len(stats)))
scene.add(svg.Rectangle((10, 10), self.size))
for line in self.observation_lines:
scene.add(svg.Line(line.p1 + self.hero.position + Point2(10,10),
line.p2 + self.hero.position + Point2(10,10)))
for obj in self.objects + [self.hero] :
scene.add(obj.draw())
offset = self.size[1] + 15
for txt in stats:
scene.add(svg.Text((10, offset + 20), txt, 15))
offset += 20
return scene
def setup_draw(self):
"""
An optional method to be triggered in simulate(...) to initialise
the figure handles for rendering.
simulate(...) will run with/without this method declared in the simulation class
As we are using SVG strings in KarpathyGame, it is not curently used.
"""
pass
def draw(self, stats=[]):
"""
An optional method to be triggered in simulate(...) to render the simulated environment.
It is repeatedly called in each simulated iteration.
simulate(...) will run with/without this method declared in the simulation class.
"""
clear_output(wait=True)
svg_html = self.to_html(stats)
display(svg_html)
| mit |
robbymeals/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
CforED/Machine-Learning | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 85 | 5728 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[[rng.randint(0, n_queries)]]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/examples/scatter_chart_examples.py | 2 | 2015 | # -*- coding: utf-8 -*-
"""
Vincent Scatter Examples
"""
#Build a Line Chart from scratch
from vincent import *
import pandas.io.data as web
all_data = {}
for ticker in ['AAPL', 'GOOG', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2010', '1/1/2013')
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
#Note that we're using timeseries, so x-scale type is "time". For non
#timeseries data, use "linear"
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
transform = MarkRef(data='table',
transform=[Transform(type='facet', keys=['data.col'])])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
fill=ValueRef(scale='color', field='data.col'),
size=ValueRef(value=10))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='symbol',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
data = Data.from_pandas(price[['GOOG', 'AAPL']])
#Using a Vincent Keyed List here
vis.data['table'] = data
vis.axis_titles(x='Date', y='Price')
vis.legend(title='GOOG vs AAPL')
vis.to_json('vega.json')
#Convenience method
vis = Scatter(price[['GOOG', 'AAPL']])
vis.axis_titles(x='Date', y='Price')
vis.legend(title='GOOG vs AAPL')
vis.colors(brew='RdBu')
vis.to_json('vega.json')
| gpl-2.0 |
hrjn/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 46 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "LabelKFold", "LeaveOneOut", "LeaveOneLabelOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
macks22/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
RiverWeng/oceanbase | oceanbase_0.4/tools/deploy/perf/1.py | 12 | 1857 | import datetime
import re
import sys
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
time_format = "%Y-%m-%d %H:%M:%S"
d = dict()
start_time = None
start_time = None
sql_count = 0
sql_time = 0
sql_time_dist = dict()
rpc_time = 0
urpc_time = 0
wait_time = 0
qps2time = dict()
rpc_times = []
urpc_times = []
wait_times = []
for l in sys.stdin:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] latency=\[(\d+)\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(5))
if start_time is None:
start_time = end_time
trace_id = m.group(1)
ts = m.group(5)[:-6]
d[trace_id] = dict(
sql_time = int(m.group(2)),
wait_time = int(m.group(3)),
rpc_time = int(m.group(4)),
)
sql_count += 1
sql_time += d[trace_id]['sql_time']
if sql_time_dist.has_key(d[trace_id]['sql_time']):
sql_time_dist[d[trace_id]['sql_time']] += 1
else:
sql_time_dist[d[trace_id]['sql_time']] = 0
wait_time += d[trace_id]['wait_time']
wait_times.append(d[trace_id]['wait_time'])
rpc_time += d[trace_id]['rpc_time']
rpc_times.append(d[trace_id]['rpc_time'])
if qps2time.has_key(ts):
qps2time[ts] += 1
else:
qps2time[ts] = 0
elapsed_seconds = (end_time - start_time) / 10**6
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / sql_count
avg_rpc_time = float(rpc_time) / sql_count
avg_urpc_time = float(urpc_time) / sql_count
avg_wait_time = float(wait_time) / sql_count
print "QPS: %d" % (qps)
print "AVG TIME: %f" % (avg_sql_time)
print "AVG RPC TIME: %f" % (avg_rpc_time)
print "AVG WAIT TIME: %f" % (avg_wait_time)
| gpl-2.0 |
victorbergelin/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
svohara/proximityforest | proximityforest/analysis/Evaluation.py | 2 | 6592 | '''
Created on Apr 20, 2012
@author: Stephen O'Hara
Common classes functions relating to the analysis of experimental results,
which I use across multiple data sets. Plotting code requires matplotlib.
Copyright (C) 2012 Stephen O'Hara
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import math
import cPickle
import sys
def confusionMatrix(cnfList, class_sizes, one_based=False):
''' Computes the confusion matrix given a list of confusers
from an experimental result. Takes into account potential uneven
class distribution.
@param cnfList: A list of tuples indicating ONLY the
errors, or confusions, present in a classification result.
The tuples are ordered (predicted, actual).
@param class_sizes: A list of the size of each class. Length
of list indicates total number of classes.
@param one_based: Class labels are assumed to be contiguous zero-based
integers. However, if your labels are instead 1-based, then set
this parameter to True. In which case, labels are assumed to be
contiguous starting from 1..N, instead of 0..N-1, where N is the
number of classes.
'''
numClasses = len(class_sizes)
classes = range(1,numClasses+1) if one_based else range(numClasses)
cfMatrix = np.zeros( (numClasses,numClasses) )
for classIdx in classes:
bads = [p for (p,t) in cnfList if t == classIdx]
#record the diag entry for this row
cf_idx = (classIdx-1) if one_based else classIdx
cfMatrix[cf_idx,cf_idx] = class_sizes[cf_idx] - len(bads)
#record the off-diagonal misses
for b in bads:
cfMatrix[cf_idx, b] += 1
#normalize row to a percentage
for col in range(numClasses):
cfMatrix[cf_idx,col] = cfMatrix[cf_idx,col] / class_sizes[cf_idx]
tmp = (cfMatrix * 10000).astype(int)
cfMatrix = (tmp*1.0/100)
print np.round_(cfMatrix, decimals=1)
return cfMatrix
def latexMatrix(cfMx, rowlabels=None):
'''
if you have the asciitable package installed, avail through mac ports
and other distributions, then we can output latex format tables for
our data. This is handy for generating latex output of a confusion matrix.
'''
try:
import asciitable
import asciitable.latex
except:
print "Error importing asciitable...you may not have this package installed."
return None
#build data dictionary, per-column structure
if rowlabels != None:
data = {'col000':rowlabels}
col_align = ['l']
else:
data = {}
col_align = []
numCols = cfMx.shape[1]
for i in range(numCols):
data['col%s'%(str(i+1).zfill(3))] = cfMx[:,i]
col_align.append('c')
col_align_str = "|%s|"%("|".join(col_align))
asciitable.write(data, sys.stdout, Writer=asciitable.Latex, col_align=col_align_str)
class ExperimentalErrorResults:
'''
This class is designed to store results of running repeated trials over a set of parameter values,
where the trials measure the error in some task, typically a classification task.
The results are stored as a numpy 2D array, where rows represent the results for a single parameter
setting and columns within a row are the results of repeated trials for the same setting.
This class provides convenience methods for analyzing the results and can be used with some
high-level matplotlib-based plotting code for visualization. Also provides convenience methods
for saving/loading results to disk.
'''
def __init__(self, ResMatrix, paramList, paramName="Parameter", desc=None, props={}):
'''
ResMatrix has rows corresponding to different forest sizes and columns corresponding
to repeated trials with the same size tree.
@param ResMatrix: The results matrix. One row per entry in paramList, one column for each
trial for each param setting. The entries in the matrix represent error rates, as a float. So 0.025 indicates
that a given trial of a given parameter setting reported a 2.5% error rate (97.5% correct).
@param paramList: A list of numerical parameter settings. Order in list corresponds to row-order of results matrix.
@param paramName: A string representation of the parameter name, used in data output/charts
@param desc: A string describing other salient information about this result object.
@param props: A dictionary with any additional key/value information you wish to store with this object
'''
self.R = ResMatrix
self.params = paramList
self.paramName = paramName
self.desc = desc
self.props = props
def __str__(self):
return "Experimental Results: %s"%self.desc
def save(self, filename):
cPickle.dump(self, open(filename,"wb"), protocol=-1)
@staticmethod
def load(filename):
'''
Static method that creates a new instance of this class from a saved pickle file
'''
return cPickle.load(open(filename,"rb"))
def setDescription(self, desc):
self.desc = desc
def compute95ci(self):
'''
Computes the 95% confidence interval around the mean values for each row
'''
numtrials = len(self.R[0,:])
x = self.getStdvs() * 1.96 / math.sqrt(numtrials)
meanErrs = np.mean(self.R,1)
low = meanErrs - x
high = meanErrs + x
return(low,high)
def getStdvs(self):
stdvs = np.std(self.R,1) #st.devs. for each row
return stdvs
def getMeanAccuracy(self):
means = np.mean(self.R,1) #mean value for each row (trials of a single forest size)
return 1 - means
def getMeanAccuracy_ci(self):
'''@return the 95% confidence interval about each mean accuracy score'''
(low,high) = self.compute95ci()
return (1-high, 1-low)
| gpl-3.0 |
harshaneelhg/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/model_selection/_validation.py | 2 | 37166 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
dpinney/omf | omf/models/__neoMetaModel__.py | 1 | 16543 | """ Common functions for all models """
import json, os, tempfile, webbrowser, math, shutil, datetime, multiprocessing, traceback, hashlib, re
from os.path import join as pJoin
from os.path import split as pSplit
from functools import wraps
from jinja2 import Template
import pandas as pd
import omf.models
from omf import web
# Locational variables so we don't have to rely on OMF being in the system path.
_myDir = os.path.dirname(os.path.abspath(__file__))
_omfDir = os.path.dirname(_myDir)
def metadata(fileUnderObject):
''' Get the model name and template for a given model from its filename and associated .html file.
The argument fileUnderObject should always be __file__.'''
fileName = os.path.basename(fileUnderObject)
modelName = fileName[0:fileName.rfind('.')]
with open(pJoin(_myDir, modelName+".html")) as f:
template = Template(f.read()) #HTML Template for showing output.
return modelName, template
def heavyProcessing(modelDir, test_mode=False):
''' Wrapper to handle model running safely and uniformly. '''
try:
# Start a timer.
startTime = datetime.datetime.now()
# Get the inputs.
with web.locked_open(pJoin(modelDir, 'allInputData.json')) as f:
inputDict = json.load(f)
# Place run start time.
inputDict['runStartTime'] = startTime.isoformat()
with open(pJoin(modelDir, 'allInputData.json'), 'w') as f:
json.dump(inputDict, f, indent=4)
# Remove old outputs.
try:
os.remove(pJoin(modelDir,"allOutputData.json"))
except Exception as e:
pass
# Estimate runtime if possible.
try:
inputDict['runtimeEst_min'] = getattr(omf.models, inputDict['modelType']).runtimeEstimate(modelDir)
except:
pass
# Get the function and run it.
work = getattr(omf.models, inputDict['modelType']).work
#This grabs the new outData model
outData = work(modelDir, inputDict)
#print("!!!!!!! thing !!!!!!!!") # DEBUG
except Exception as e:
# cancel(modelDir)
if test_mode == True:
raise e
# If input range wasn't valid delete output, write error to disk.
thisErr = traceback.format_exc()
print('ERROR IN MODEL', modelDir, thisErr)
inputDict['stderr'] = thisErr
with web.locked_open(os.path.join(modelDir,'stderr.txt'),'w') as errorFile:
errorFile.write(thisErr)
else:
# No errors, so update the runTime in the input file.
endTime = datetime.datetime.now()
inputDict["runTime"] = str(datetime.timedelta(seconds=int((endTime - startTime).total_seconds())))
# Write output.
modelType = inputDict["modelType"]
#Get current file hashes and dd to the output
with open(pJoin(_myDir, modelType+".html")) as f:
htmlFile = f.read()
currentHtmlHash = hashlib.sha256(htmlFile.encode('utf-8')).hexdigest()
with open(pJoin(_myDir, modelType+".py")) as f:
pythonFile = f.read()
currentPythonHash = hashlib.sha256(pythonFile.encode('utf-8')).hexdigest()
outData['htmlHash'] = currentHtmlHash
outData['pythonHash'] = currentPythonHash
outData['oldVersion'] = False
# Raw input/output file names.
outData['fileNames'] = os.listdir(modelDir)
outData['fileNames'].append('allOutputData.json')
with web.locked_open(pJoin(modelDir, "allOutputData.json"),"w") as outFile:
json.dump(outData, outFile, indent=4)
finally:
# Clean up by updating input data.
try:
with web.locked_open(pJoin(modelDir,"allInputData.json"),"w") as inFile:
json.dump(inputDict, inFile, indent=4)
except: pass
try: os.remove(pJoin(modelDir,"PPID.txt"))
except: pass
def run(modelDir):
''' Run the model in a separate process. web.py calls this to run the model.
This function will return fast, but results take a while to hit the file system.'''
backProc = multiprocessing.Process(target = heavyProcessing, args = (modelDir,))
backProc.start()
with web.locked_open(pJoin(modelDir, "PPID.txt"),"w+") as pPidFile:
pPidFile.write(str(backProc.pid))
print("SENT TO BACKGROUND", modelDir)
def runForeground(modelDir):
''' Run all model work immediately in the same thread. '''
with web.locked_open(pJoin(modelDir, "PPID.txt"),"w+") as pPidFile:
pPidFile.write('-999') # HACK: put in an invalid PID to indicate the model is running.
print("FOREGROUND RUNNING", modelDir)
heavyProcessing(modelDir)
def renderTemplate(modelDir, absolutePaths=False, datastoreNames={}):
''' Render the model template to an HTML string.
By default render a blank one for new input.
If modelDir is valid, render results post-model-run.
If absolutePaths, the HTML can be opened without a server. '''
try:
with web.locked_open(pJoin(modelDir, 'allInputData.json')) as f:
inJson = json.load(f)
modelPath, modelName = pSplit(modelDir)
deepPath, modelOwner = pSplit(modelPath)
inJson["modelName"] = modelName
inJson["user"] = modelOwner
modelType = inJson["modelType"]
template = getattr(omf.models, modelType).template
allInputData = json.dumps(inJson)
# Get hashes for model python and html files
with open(pJoin(_myDir, modelType+".html")) as f:
htmlFile = f.read()
currentHtmlHash = hashlib.sha256(htmlFile.encode('utf-8')).hexdigest()
with open(pJoin(_myDir, modelType+".py")) as f:
pythonFile = f.read()
currentPythonHash = hashlib.sha256(pythonFile.encode('utf-8')).hexdigest()
except IOError:
allInputData = None
inJson = None
try:
with web.locked_open(pJoin(modelDir,"allOutputData.json")) as f:
allOutputData = f.read()
with web.locked_open(pJoin(modelDir, "allOutputData.json")) as f:
outJson = json.load(f)
try:
#Needed? Should this be handled a different way? Add hashes to the output if they are not yet present
if ('pythonHash' not in outJson) or ('htmlHash' not in outJson):
outJson['htmlHash'] = currentHtmlHash
outJson['pythonHash'] = currentPythonHash
outJson['oldVersion'] = False
#If the hashes do not match, mark the model as an old version
elif outJson['htmlHash'] != currentHtmlHash or outJson['pythonHash'] != currentPythonHash:
outJson['oldVersion'] = True
#If the hashes match, mark the model as up to date
else:
outJson['oldVersion'] = False
except (UnboundLocalError, KeyError) as e:
print((traceback.print_exc()))
print(('error:' + str(e)))
except IOError:
allOutputData = None
outJson = None
if absolutePaths:
# Parent of current folder.
pathPrefix = _omfDir
else:
pathPrefix = ""
# Generate standard raw output files.
rawFilesTemplate = '''
<p class="reportTitle">Raw Input and Output Files</p>
<div id="rawOutput" class="content" style="margin-top:0px">
{% for name in allOutputDataDict['fileNames'] %}
{% if loop.index > 1 %}— {% endif %}<a href="/downloadModelData/{{allInputDataDict['user']}}/{{allInputDataDict['modelName']}}/{{name}}">{{name}}</a>
{% endfor %}
</div>
'''
rawOutputFiles = Template(rawFilesTemplate).render(allOutputDataDict=outJson, allInputDataDict=inJson)
# Generate standard model buttons.
omfModelButtonsTemplate = '''
<div class="wideInput" style="text-align:right">
{% if modelStatus != 'running' and (loggedInUser == modelOwner or loggedInUser == 'admin') %}
<button id="deleteButton" type="button" onclick="deleteModel()">Delete</button>
<button id="runButton" type="submit">Run Model</button>
{% endif %}
{% if modelStatus == "finished" %}
<button id="shareButton" type="button" onclick="shareModel()">Share</button>
<button id="duplicateButton" type="button" onclick="duplicateModel()">Duplicate</button>
{% endif %}
{% if modelStatus == "running" and (loggedInUser == modelOwner or loggedInUser == 'admin') %}
<button id="cancelButton" type="button" onclick="cancelModel()">Cancel Run</button>
{% endif %}
</div>
'''
# Generate standard status content.
loggedInUser = datastoreNames.get('currentUser', 'test')
modelStatus = getStatus(modelDir)
omfModelButtons = Template(omfModelButtonsTemplate).render(modelStatus=modelStatus, loggedInUser=loggedInUser, modelOwner=modelOwner)
now = datetime.datetime.now()
try:
mod_start = datetime.datetime.fromisoformat(inJson.get('runStartTime'))
except:
mod_start = now
elapsed_dt = now - mod_start
elapsed_min = elapsed_dt.total_seconds() / 60.0
model_estimate_min = float(inJson.get('runtimeEst_min', '2.0'))
remain_min = model_estimate_min - elapsed_min
runDebugTemplate = '''
{% if modelStatus == 'running' %}
<div id ="runIndicator" class="content">
Model has run for {{elapsed_min}} minutes. {{remain_min}} minutes estimated until completion. Results updated every 5 seconds.
</div>
{% endif %}
{% if modelStatus == 'stopped' and stderr != '' %}
<div id ="stopIndicator" class="content">
<pre id='errorText' style='overflow-x:scroll'>MODEL ENCOUNTERED AN ERROR AS FOLLOWS:\n\n{{stderr}}</pre>
</div>
{% endif %}
'''
omfRunDebugBlock = Template(runDebugTemplate).render(modelStatus=modelStatus, stderr=inJson.get('stderr', ''), elapsed_min=round(elapsed_min,2), remain_min=round(remain_min,2))
# Raw input output include.
return template.render(allInputData=allInputData, allOutputData=allOutputData, modelStatus=modelStatus, pathPrefix=pathPrefix,
datastoreNames=datastoreNames, modelName=modelType, allInputDataDict=inJson, allOutputDataDict=outJson, rawOutputFiles=rawOutputFiles, omfModelButtons=omfModelButtons, omfRunDebugBlock=omfRunDebugBlock)
def renderAndShow(modelDir, datastoreNames={}):
''' Render and open a template (blank or with output) in a local browser. '''
with tempfile.NamedTemporaryFile('w', suffix=".html", delete=False) as temp:
temp.write(renderTemplate(modelDir, absolutePaths=True))
temp.flush()
webbrowser.open("file://" + temp.name)
def renderTemplateToFile(modelDir, datastoreNames={}):
''' Render and open a template (blank or with output) in a local browser. '''
with tempfile.NamedTemporaryFile('w+', suffix=".html", delete=False) as baseTemplate:
baseTemplate.write(renderTemplate(modelDir, absolutePaths=False))
baseTemplate.flush()
baseTemplate.seek(0)
with web.locked_open(pJoin(modelDir,'inlineTemplate.html'), 'w', encoding='utf-8') as inlineTemplate:
for line in baseTemplate:
#add backslash to regex between signle and double quote
matchObj = re.match( r"(.*)/static(.+?)(['\"])(.+?)", line, re.M|re.I)
scriptTags = re.match( r"(.*)<script(.*)static/(.*)</script>", line, re.M|re.I)
styleTags = re.match( r"(.*)<link(.*)stylesheet", line, re.M|re.I)
if scriptTags:
with open(_omfDir + "/static"+ matchObj.group(2)) as f:
sourceFile = f.read()
with open(_omfDir + "/static"+ matchObj.group(2), 'r', encoding='utf-8') as yFile:
ttempfile = yFile.readlines()
tmp = '<script>'+sourceFile+'</script>'
inlineTemplate.write('<script>')
for i in ttempfile:
try:
inlineTemplate.write(i)
except (UnicodeEncodeError):
print(i)
inlineTemplate.write('</script>')
elif styleTags:
with open(_omfDir + "/static"+ matchObj.group(2), 'r', encoding='utf-8') as yFile:
ttempfile = yFile.readlines()
inlineTemplate.write('<style>')
for i in ttempfile:
try:
inlineTemplate.write(i)
except (UnicodeEncodeError):
print(i)
inlineTemplate.write('</style>')
else:
inlineTemplate.write(str(line))
def getStatus(modelDir):
''' Is the model stopped, running or finished? '''
try:
modFiles = os.listdir(modelDir)
except:
modFiles = []
hasPID = "PPID.txt" in modFiles
hasOutput = "allOutputData.json" in modFiles
if hasPID:
return 'running'
elif hasOutput:
return 'finished'
else:
return 'stopped'
def new(modelDir, defaultInputs):
''' Create a new instance of a model. Returns true on success, false on failure. '''
alreadyThere = os.path.isdir(modelDir) or os.path.isfile(modelDir)
try:
if not alreadyThere:
os.makedirs(modelDir)
else:
defaultInputs["created"] = str(datetime.datetime.now())
with web.locked_open(pJoin(modelDir, "allInputData.json"),"w") as inputFile:
json.dump(defaultInputs, inputFile, indent = 4)
return False
defaultInputs["created"] = str(datetime.datetime.now())
with web.locked_open(pJoin(modelDir, "allInputData.json"),"w") as inputFile:
json.dump(defaultInputs, inputFile, indent = 4)
return True
except:
return False
def cancel(modelDir):
''' Try to cancel a currently running model. '''
# Kill the GridLAB-D process if it has already been created. If GridLAB-D hasn't created a PID.txt file, or GridLAB-D never ran, keep going
try:
with web.locked_open(pJoin(modelDir,"PID.txt"),"r") as pidFile:
pid = int(pidFile.read())
# print "pid " + str(pid)
os.kill(pid, 15)
print("PID KILLED")
except:
pass
# Kill the runForeground process if it has already been created. If __neoMetaModel__.py hasn't created a PPID.txt file yet, keep going
try:
with web.locked_open(pJoin(modelDir, "PPID.txt"), "r") as pPidFile:
pPid = int(pPidFile.read())
os.kill(pPid, 15)
print("PPID KILLED")
except:
pass
# Remove PID, PPID, and allOutputData file if existed
for fName in ['PID.txt', 'PPID.txt', 'allOutputData.json']:
try:
os.remove(pJoin(modelDir,fName))
except:
pass
print("CANCELED", modelDir)
def roundSig(x, sig=3):
''' Round to a given number of sig figs. '''
roundPosSig = lambda y,sig: round(y, sig-int(math.floor(math.log10(y)))-1)
if x == 0: return 0
elif x!=x: return 0 # This is handling float's NaN.
elif x < 0: return -1*roundPosSig(-1*x, sig)
else: return roundPosSig(x, sig)
def safe_assert(bool_statement, error_str, keep_running):
if keep_running:
if not bool_statement:
print(error_str)
else:
assert bool_statement, error_str
def csvValidateAndLoad(file_input, modelDir, header=0, nrows=8760, ncols=1, dtypes=[], return_type='list_by_col', ignore_nans=False, save_file=None, ignore_errors=False):
"""
Safely validates, loads, and saves user's file input for model's use.
Parameters:
file_input: stream from input_dict to be read
modelDir: a temporary or permanent file saved to given location
header: row of header, enter "None" if no header provided.
nrows: skip confirmation if None
ncols: skip confirmation if None
dtypes: dtypes as columns should be parsed. If empty, no parsing.
Use "False" for column index where there should be no parsing.
This can be used as any mapping function.
return_type: options: 'dict', 'df', 'list_by_col', 'list_by_row'
ignore_nans: Ignore NaN values
save_file: if not None, save file with given *relative* pathname. It will be appended to modelDir
ignore_errors (bool): if True, allow program to keep running when errors found and print
Returns:
Datatype as dictated by input parameters
"""
# save temporary file
temp_path = os.path.join(modelDir, 'csv_temp.csv') if save_file == None else os.path.join(modelDir, save_file)
with open(temp_path, 'w') as f:
f.write(file_input)
df = pd.read_csv(temp_path, header=header)
if nrows != None:
safe_assert( df.shape[0] == nrows, (
f'Incorrect CSV size. Required: {nrows} rows. Given: {df.shape[0]} rows.'
), ignore_errors)
if ncols != None:
safe_assert( df.shape[1] == ncols, (
f'Incorrect CSV size. Required: {ncols} columns. Given: {df.shape[1]} columns.'
), ignore_errors)
# NaNs
if not ignore_nans:
d = df.isna().any().to_dict()
nan_columns = [k for k, v in d.items() if v]
safe_assert(
len(nan_columns) == 0,
f'NaNs detected in columns {nan_columns}. Please adjust your CSV accordingly.',
ignore_errors
)
# parse datatypes
safe_assert(
(len(dtypes) == 0) or (len(dtypes) == ncols),
f"Length of dtypes parser must match ncols, you've entered {len(dtypes)}. If no parsing, provide empty array.",
ignore_errors
)
for t, x in zip(dtypes, df.columns):
if t != False:
df[x] = df[x].map(lambda x: t(x))
# delete file if requested
if save_file == None:
os.remove(temp_path)
# return proper type
OPTIONS = ['dict', 'df', 'list_by_col', 'list_by_row']
safe_assert(
return_type in OPTIONS,
f'return_type not recognized. Options are {OPTIONS}.',
ignore_errors
)
if return_type == 'list_by_col':
return [df[x].tolist() for x in df.columns]
elif return_type == 'list_by_row':
return df.values.tolist()
elif return_type == 'df':
return df
elif return_type == 'dict':
return [{k: v for k, v in row.items()} for _, row in df.iterrows()]
def neoMetaModel_test_setup(function):
@wraps(function)
def test_setup_wrapper(*args, **kwargs):
heavyProcessing.__defaults__ = (True,)
return function()
return test_setup_wrapper
def _test():
""" No test required for this file. """
pass | gpl-2.0 |
dsm054/pandas | pandas/errors/__init__.py | 1 | 5567 | # flake8: noqa
"""
Expose public exceptions & warnings
"""
from pandas._libs.tslibs import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warning raised when there is a possible
performance impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
on a pandas object, but that function is not supported by
the object e.g. ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in `pd.read_csv`.
"""
class DtypeWarning(Warning):
"""
Warning raised when reading different dtypes in a column from a file.
Raised for a dtype incompatibility. This can happen whenever `read_csv`
or `read_table` encounter non-uniform dtypes in a column(s) of a given
CSV file.
See Also
--------
pandas.read_csv : Read CSV (comma-separated) file into a DataFrame.
pandas.read_table : Read general delimited file into a DataFrame.
Notes
-----
This warning is issued when dealing with larger files because the dtype
checking happens per chunk read.
Despite the warning, the CSV file is read with mixed types in a single
column which will be an object type. See the examples below to better
understand this issue.
Examples
--------
This example creates and reads a large CSV file with a column that contains
`int` and `str`.
>>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 +
... ['1'] * 100000),
... 'b': ['b'] * 300000})
>>> df.to_csv('test.csv', index=False)
>>> df2 = pd.read_csv('test.csv')
... # DtypeWarning: Columns (0) have mixed types
Important to notice that ``df2`` will contain both `str` and `int` for the
same input, '1'.
>>> df2.iloc[262140, 0]
'1'
>>> type(df2.iloc[262140, 0])
<class 'str'>
>>> df2.iloc[262150, 0]
1
>>> type(df2.iloc[262150, 0])
<class 'int'>
One way to solve this issue is using the `dtype` parameter in the
`read_csv` and `read_table` functions to explicit the conversion:
>>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str})
No warning was issued.
>>> import os
>>> os.remove('test.csv')
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered.
"""
class ParserWarning(Warning):
"""
Warning raised when reading a file that doesn't use the default 'c' parser.
Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change
parsers, generally from the default 'c' parser to 'python'.
It happens due to a lack of support or functionality for parsing a
particular attribute of a CSV file with the requested engine.
Currently, 'c' unsupported options include the following parameters:
1. `sep` other than a single character (e.g. regex separators)
2. `skipfooter` higher than 0
3. `sep=None` with `delim_whitespace=False`
The warning can be avoided by adding `engine='python'` as a parameter in
`pd.read_csv` and `pd.read_table` methods.
See Also
--------
pd.read_csv : Read CSV (comma-separated) file into DataFrame.
pd.read_table : Read general delimited file into DataFrame.
Examples
--------
Using a `sep` in `pd.read_csv` other than a single character:
>>> import io
>>> csv = u'''a;b;c
... 1;1,8
... 1;2,1'''
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP
... # ParserWarning: Falling back to the 'python' engine...
Adding `engine='python'` to `pd.read_csv` removes the Warning:
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python')
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
class NullFrequencyError(ValueError):
"""
Error raised when a null `freq` attribute is used in an operation
that needs a non-null frequency, particularly `DatetimeIndex.shift`,
`TimedeltaIndex.shift`, `PeriodIndex.shift`.
"""
class AccessorRegistrationWarning(Warning):
"""Warning for attribute conflicts in accessor registration."""
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance, methodtype='method'):
types = {'method', 'classmethod', 'staticmethod', 'property'}
if methodtype not in types:
msg = 'methodtype must be one of {}, got {} instead.'.format(
methodtype, types)
raise ValueError(msg)
self.methodtype = methodtype
self.class_instance = class_instance
def __str__(self):
if self.methodtype == 'classmethod':
name = self.class_instance.__name__
else:
name = self.class_instance.__class__.__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
return (msg.format(methodtype=self.methodtype, name=name))
| bsd-3-clause |
DR08/mxnet | python/mxnet/model.py | 4 | 39905 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names,
update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution latter
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/container.py | 4 | 3235 | import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method(self)
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
return d
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| mit |
gclenaghan/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 22 | 13165 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py | 69 | 24593 | # Todd Miller [email protected]
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
| gpl-3.0 |
costypetrisor/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
mcdeaton13/dynamic | Data/Calibration/Firm Calibration Python/data/soi/processing/pull_soi_corp.py | 2 | 10435 | """
SOI Corporate Tax Data (pull_soi_corp.py):
-------------------------------------------------------------------------------
Last updated: 6/26/2015.
This module creates functions for pulling the corporate soi tax data into
NAICS trees. The data is categorized into C, S, and their aggregate.
Note that only the S and aggregate corporation data are explicitly given.
The C-corporation data is inferred from the two.
"""
# Packages:
import os.path
import numpy as np
import pandas as pd
# Directory names:
_CUR_DIR = os.path.dirname(__file__)
_OUT_DIR = os.path.join(os.path.dirname(_CUR_DIR), "output")
_DATA_DIR = os.path.join(os.path.dirname(_CUR_DIR), "data")
_CORP_DIR = os.path.join(_DATA_DIR, "soi_corporate")
# Importing custom modules:
import naics_processing as naics
import file_processing as fp
import constants as cst
# Dataframe names:
_TOT_DF_NM = cst.TOT_CORP_DF_NM
_S_DF_NM = cst.S_CORP_DF_NM
_C_DF_NM = cst.C_CORP_DF_NM
# (Optional) Hardcode the year that the partner data is from:
_YR = ""
_YR = str(_YR)
# Filenames:
_TOT_CORP_IN_FILE = fp.get_file(dirct=_CORP_DIR, contains=[_YR+"sb1.csv"])
_S_CORP_IN_FILE = fp.get_file(dirct=_CORP_DIR, contains=[_YR+"sb3.csv"])
# Full path for files:
_TOT_CORP_IN_PATH = os.path.join(_CORP_DIR, _TOT_CORP_IN_FILE)
_S_CORP_IN_PATH = os.path.join(_CORP_DIR, _S_CORP_IN_FILE)
_TOT_CORP_OUT_PATH = os.path.join(_OUT_DIR, _TOT_DF_NM+".csv")
_S_CORP_OUT_PATH = os.path.join(_OUT_DIR, _S_DF_NM+".csv")
_C_CORP_OUT_PATH = os.path.join(_OUT_DIR, _C_DF_NM+".csv")
# Constant factors:
_TOT_CORP_IN_FILE_FCTR = 10**3
_S_CORP_IN_FILE_FCTR = 10**3
# Input--default dictionaries for df-columns to input-columns.
_DFLT_TOT_CORP_COLS_DICT = cst.DFLT_TOT_CORP_COLS_DICT
_DFLT_S_CORP_COLS_DICT = cst.DFLT_S_CORP_COLS_DICT
# Input--NAICS column:
_NAICS_COL_NM = "INDY_CD"
def load_soi_tot_corp(data_tree=naics.generate_tree(),
cols_dict=_DFLT_TOT_CORP_COLS_DICT,
blueprint=None, blue_tree=None,
from_out=False, output_path=_TOT_CORP_OUT_PATH):
""" This function pulls the soi total corporation data.
:param data_tree: The NAICS tree to read the data into.
:param cols_dict: A dictionary mapping dataframe columns to the name of
the column names in the input file
:param blueprint: The key corresponding to a dataframe in a tree to be
used as a "blueprint" for populating the df_list dataframes forward.
:param blue_tree: A NAICS tree with the "blueprint" dataframe. The default
is the original NAICS tree.
:param from_out: Whether to read in the data from output.
:param output_path: The path of the output file.
"""
# If from_out, load the data tree from output:
if from_out:
data_tree = naics.load_tree_dfs(input_path=output_path, tree=data_tree)
return data_tree
# Pertinent information:
num_inds = len(data_tree.enum_inds) # Number of industries in NAICS tree.
data_cols = cols_dict.keys() # Dataframe column names.
# Opening the soi total corporate data file:
try:
tot_corp_data = pd.read_csv(_TOT_CORP_IN_PATH).fillna(0)
except IOError:
print "IOError: Tot-Corp soi data file not found."
return None
# Initializing dataframes for all NAICS industries:
data_tree.append_all(df_nm=_TOT_DF_NM, df_cols=data_cols)
# Reading the total corporation data into the NAICS tree:
enum_index = 0
for code_num in np.unique(tot_corp_data[_NAICS_COL_NM]):
# Find the industry with a code that matches "code_num":
ind_found = False
for i in range(0, num_inds):
enum_index = (enum_index + 1) % num_inds
cur_ind = data_tree.enum_inds[enum_index]
cur_dfs = cur_ind.data.dfs[cst.CODE_DF_NM]
for j in range(0, cur_dfs.shape[0]):
if(cur_dfs.iloc[j,0] == code_num):
# Industry with the matching code has been found:
ind_found = True
cur_dfs = cur_ind.data.dfs[_TOT_DF_NM]
break
# If the matching industry has been found stop searching for it:
if ind_found:
break
# If no match was found, then ignore data.
if not ind_found:
continue
# Indicators for if rows in tot_corp_data match current industry code:
indicators = (tot_corp_data[_NAICS_COL_NM] == code_num)
# Calculating the data:
for j in cols_dict:
# Some of the data may not be reported:
if cols_dict[j] == "":
cur_dfs[j] = 0
else:
# Note: double counting the data in the original dataset.
cur_dfs[j][0] = sum(indicators * tot_corp_data[cols_dict[j]])/2.0
cur_dfs[j][0] = cur_dfs[j] * _TOT_CORP_IN_FILE_FCTR
# Populate all levels of specificity in the NAICS tree:
naics.pop_back(tree=data_tree, df_list=[_TOT_DF_NM])
naics.pop_forward(tree=data_tree, df_list=[_TOT_DF_NM],
blueprint=blueprint, blue_tree=blue_tree)
return data_tree
def load_soi_s_corp(data_tree=naics.generate_tree(),
cols_dict=_DFLT_S_CORP_COLS_DICT,
blue_tree=None, blueprint=None,
from_out=False, out_path=_S_CORP_OUT_PATH):
""" This function pulls the soi s-corporation data.
:param data_tree: The tree to read the data into.
:param cols_dict: A dictionary mapping dataframe columns to the name of
the column names in the input file
:param blueprint: The key corresponding to a dataframe in a tree to be
used as a "blueprint" for populating the df_list dataframes forward.
:param blue_tree: A NAICS tree with the "blueprint" dataframe. The default
is the original NAICS tree.
:param from_out: Whether to read in the data from output.
:param output_path: The path of the output file.
"""
# If from_out, load the data tree from output:
if from_out:
data_tree = naics.load_tree_dfs(input_path=out_path, tree=data_tree)
return data_tree
# Pertinent information:
num_inds = len(data_tree.enum_inds) # Number of industries in NAICS tree.
data_cols = cols_dict.keys() # Dataframe column names.
# Opening the soi S-corporate data file:
try:
s_corp_data = pd.read_csv(_S_CORP_IN_PATH).fillna(0)
except IOError:
print "IOError: S-Corp soi data file not found."
return None
# Initializing dataframes for all NAICS industries:
data_tree.append_all(df_nm=_S_DF_NM, df_cols=data_cols)
# Reading the S-corporation data into the NAICS tree:
enum_index = 0
for code_num in np.unique(s_corp_data[_NAICS_COL_NM]):
# Find the industry with a code that matches "code_num":
ind_found = False
for i in range(0, len(data_tree.enum_inds)):
enum_index = (enum_index + 1) % num_inds
cur_ind = data_tree.enum_inds[i]
cur_dfs = cur_ind.data.dfs[cst.CODE_DF_NM]
for j in range(0, cur_dfs.shape[0]):
if(cur_dfs.iloc[j,0] == code_num):
# Industry with the matching code has been found:
ind_found = True
cur_dfs = cur_ind.data.dfs[cst.S_CORP_DF_NM]
break
# If the matching industry has been found stop searching for it.
if ind_found:
break
# If no match was found, then ignore data.
if not ind_found:
continue
# Indicators for if rows in s_corp_data match current industry code:
indicators = (s_corp_data[_NAICS_COL_NM] == code_num)
# Calculating the data:
for j in cols_dict:
# Some are not reported for S Corporations:
if cols_dict[j] == "":
cur_dfs[j] = 0
else:
cur_dfs.loc[0,j] = sum(indicators * s_corp_data[cols_dict[j]])/2.0
cur_dfs.loc[0,j] = cur_dfs.loc[0,j] * _S_CORP_IN_FILE_FCTR
# Default blueprint is tot_corps:
has_tot_df = _TOT_DF_NM in data_tree.enum_inds[0].data.dfs.keys()
if blueprint == None and has_tot_df:
blueprint = _TOT_DF_NM
# Populate all levels of specificity in the NAICS tree:
naics.pop_back(tree=data_tree, df_list=[_S_DF_NM])
naics.pop_forward(tree=data_tree, df_list=[_S_DF_NM],
blueprint=blueprint, blue_tree=blue_tree)
return data_tree
def calc_c_corp(data_tree=naics.generate_tree(), from_out=False,
out_path=_C_CORP_OUT_PATH):
""" This function calculates the soi c-corporation data based of the
s and the aggregate corporation data.
:param data_tree: The tree to read the data into.
:param cols_dict: A dictionary mapping dataframe columns to the name of
the column names in the input file
:param blueprint: The key corresponding to a dataframe in a tree to be
used as a "blueprint" for populating the df_list dataframes forward.
:param blue_tree: A NAICS tree with the "blueprint" dataframe. The default
is the original NAICS tree.
:param from_out: Whether to read in the data from output.
:param output_path: The path of the output file.
"""
# If from_out, load the data tree from output:
if from_out:
data_tree = naics.load_tree_dfs(input_path=out_path, tree=data_tree)
return data_tree
''' For each industry, subtract the s-corporation data from the total to
get the c-corporation data.'''
for ind in data_tree.enum_inds:
try:
# Industry's total-corporation data:
cur_tot = ind.data.dfs[_TOT_DF_NM]
except KeyError:
print "Total-Corp data not initialized when interpolating C-Corp."
try:
# Industry's S-corporation data:
cur_s = ind.data.dfs[_S_DF_NM]
except KeyError:
print "S-Corp data not initialized when interpolating C-Corp."
data_cols = cur_tot.columns.values.tolist()
# Append C-corporation dataframe:
ind.append_dfs((_C_DF_NM, pd.DataFrame(np.zeros((1,len(data_cols))),
columns = data_cols)))
# C-corporation data:
ind.data.dfs[_C_DF_NM] = cur_tot - cur_s
return data_tree
| mit |
adykstra/mne-python | examples/stats/plot_fdr_stats_evoked.py | 24 | 2752 | """
=======================================
FDR correction on T-test on sensor data
=======================================
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
###############################################################################
# Plot
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
| bsd-3-clause |
jfinkels/networkx | examples/drawing/atlas.py | 4 | 2761 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# list of graphs of all connected components
C = nx.connected_component_subgraphs(U)
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = nx.connected_component_subgraphs(G)
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png", dpi=75)
| bsd-3-clause |
appapantula/deeppy | setup.py | 16 | 2509 | #!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as f:
install_requires = [l.strip() for l in f]
version = None
regex = re.compile(r'''^__version__ = ['"]([^'"]*)['"]''')
with open(os.path.join('deeppy', '__init__.py')) as f:
for line in f:
mo = regex.search(line)
if mo is not None:
version = mo.group(1)
break
if version is None:
raise RuntimeError('Could not find version number')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import subprocess
subprocess.call(['py.test'] + self.pytest_args + ['test'])
class Coverage(Command):
description = 'Generate a test coverage report.'
user_options = [('report=', 'r', 'Report type (report/html)')]
def initialize_options(self):
self.report = 'report'
def finalize_options(self):
pass
def run(self):
import subprocess
subprocess.call(['coverage', 'run', '--source=deeppy', '-m', 'py.test',
'test'])
subprocess.call(['coverage', self.report])
setup(
name='deeppy',
version=version,
author='Anders Boesen Lindbo Larsen',
author_email='[email protected]',
description='Deep learning in Python',
license='MIT',
url='http://compute.dtu.dk/~abll',
packages=find_packages(exclude=['doc', 'examples', 'test']),
install_requires=install_requires,
long_description=read('README.md'),
cmdclass={
'test': PyTest,
'coverage': Coverage,
},
extras_require={
'test': ['pytest', 'sklearn'],
'coverage': ['pytest', 'sklearn', 'coverage'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
| mit |
adamgreenhall/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
kjung/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 14 | 8194 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_labels = len(le.classes_)
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
le = LabelEncoder()
labels = le.fit_transform(labels)
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.ones(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf * intra_clust_dists
for curr_label in unique_labels:
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = np.sum(mask) - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in unique_labels:
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
return sil_samples
| bsd-3-clause |
matbra/bokeh | bokeh/server/tests/config/test_blaze_config.py | 29 | 1202 | from __future__ import absolute_import
import numpy as np
import pandas as pd
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = pd.DataFrame(bivariate)
data_dict = dict(uniform=uniform,
gauss=gauss,
bivariate=bivariate)
| bsd-3-clause |
fredhusser/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
rcharp/toyota-flask | numpy/numpy/core/code_generators/ufunc_docstrings.py | 11 | 89690 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the
inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``, the result has the same sign as
the divisor `x2`. It is equivalent to the Python modulus operator
``x1 % x2`` and should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a
scalar if both `x1` and `x2` are scalars.
See Also
--------
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
# This doc is not currently used, but has been converted to a C string
# that can be found in numpy/core/src/umath/umathmodule.c where the
# frexp ufunc is constructed.
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1: ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2: ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
# This doc is not currently used, but has been converted to a C string
# that can be found in numpy/core/src/umath/umathmodule.c where the
# ldexp ufunc is constructed.
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| apache-2.0 |
mrshu/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 6 | 2901 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from sklearn.utils.testing import assert_equal
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| bsd-3-clause |
bbfamily/abu | python/c6.py | 1 | 28184 | # -*- encoding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# noinspection PyUnresolvedReferences
import abu_local_env
import abupy
from abupy import ABuSymbolPd
from abupy import six, xrange
from abc import ABCMeta, abstractmethod
warnings.filterwarnings('ignore')
sns.set_context(rc={'figure.figsize': (14, 7)})
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
tsla_close = ABuSymbolPd.make_kl_df('usTSLA').close
# x序列: 0,1,2, ...len(tsla_close)
x = np.arange(0, tsla_close.shape[0])
# 收盘价格序列
y = tsla_close.values
"""
第六章 量化工具——数学:你一生的追求到底能带来多少幸福
abu量化系统github地址:https://github.com/bbfamily/abu (您的star是我的动力!)
abu量化文档教程ipython notebook:https://github.com/bbfamily/abu/tree/master/abupy_lecture
"""
def sample_611_1(show=True):
"""
6.1.1 线性回归
:return:
"""
import statsmodels.api as sm
from statsmodels import regression
def regress_y(_y):
_y = _y
# x序列: 0,1,2, ...len(y)
_x = np.arange(0, len(_y))
_x = sm.add_constant(_x)
# 使用OLS做拟合
_model = regression.linear_model.OLS(_y, _x).fit()
return _model
model = regress_y(y)
b = model.params[0]
k = model.params[1]
# y = kx + b
y_fit = k * x + b
if show:
plt.plot(x, y)
plt.plot(x, y_fit, 'r')
plt.show()
# summary模型拟合概述,表6-1所示
print(model.summary())
return y_fit
# noinspection PyPep8Naming
def sample_611_2():
"""
6.1.1 线性回归
:return:
"""
y_fit = sample_611_1(show=False)
MAE = sum(np.abs(y - y_fit)) / len(y)
print('偏差绝对值之和(MAE)={}'.format(MAE))
MSE = sum(np.square(y - y_fit)) / len(y)
print('偏差绝对值之和(MSE)={}'.format(MSE))
RMSE = np.sqrt(sum(np.square(y - y_fit)) / len(y))
print('偏差绝对值之和(RMSE)={}'.format(RMSE))
from sklearn import metrics
print('sklearn偏差绝对值之和(MAE)={}'.format(metrics.mean_absolute_error(y, y_fit)))
print('sklearn偏差平方(MSE)={}'.format(metrics.mean_squared_error(y, y_fit)))
print('sklearn偏差平方和开平方(RMSE)={}'.format(np.sqrt(metrics.mean_squared_error(y, y_fit))))
# noinspection PyCallingNonCallable
def sample_612():
"""
6.1.2 多项式回归
:return:
"""
import itertools
# 生成9个subplots 3*3
_, axs = plt.subplots(nrows=3, ncols=3, figsize=(15, 15))
# 将 3 * 3转换成一个线性list
axs_list = list(itertools.chain.from_iterable(axs))
# 1-9次多项式回归
poly = np.arange(1, 10, 1)
for p_cnt, ax in zip(poly, axs_list):
# 使用polynomial.Chebyshev.fit进行多项式拟合
p = np.polynomial.Chebyshev.fit(x, y, p_cnt)
# 使用p直接对x序列代人即得到拟合结果序列
y_fit = p(x)
# 度量mse值
from sklearn import metrics
mse = metrics.mean_squared_error(y, y_fit)
# 使用拟合次数和mse误差大小设置标题
ax.set_title('{} poly MSE={}'.format(p_cnt, mse))
ax.plot(x, y, '', x, y_fit, 'r.')
plt.show()
def sample_613():
"""
6.1.3 插值
:return:
"""
from scipy.interpolate import interp1d, splrep, splev
# 示例两种插值计算方式
_, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 5))
# 线性插值
linear_interp = interp1d(x, y)
# axs[0]左边的
axs[0].set_title('interp1d')
# 在相同坐标系下,同样的x,插值的y值使r.绘制(红色点)
axs[0].plot(x, y, '', x, linear_interp(x), 'r.')
# B-spline插值
splrep_interp = splrep(x, y)
# axs[1]右边的
axs[1].set_title('splrep')
# #在相同坐标系下,同样的x,插值的y值使g.绘制(绿色点)
axs[1].plot(x, y, '', x, splev(x, splrep_interp), 'g.')
plt.show()
"""
6.2 蒙特卡洛方法与凸优化
6.2.1 你一生的追求到底能带来多少幸福
"""
# 每个人平均寿命期望是75年,约75*365=27375天
K_INIT_LIVING_DAYS = 27375
class Person(object):
"""
人类
"""
def __init__(self):
# 初始化人平均能活的寿命
self.living = K_INIT_LIVING_DAYS
# 初始化幸福指数
self.happiness = 0
# 初始化财富值
self.wealth = 0
# 初始化名望权利
self.fame = 0
# 活着的第几天
self.living_day = 0
def live_one_day(self, seek):
"""
每天只能进行一个seek,这个seek决定了你今天追求的是什么,得到了什么
seek的类型属于下面将编写的BaseSeekDay
:param seek:
:return:
"""
# 调用每个独特的BaseSeekDay类都会实现的do_seek_day,得到今天的收获
consume_living, happiness, wealth, fame = seek.do_seek_day()
# 每天要减去生命消耗,有些seek前面还会增加生命
self.living -= consume_living
# seek得到的幸福指数积累
self.happiness += happiness
# seek得到的财富积累
self.wealth += wealth
# seek得到的名望权力积累
self.fame += fame
# 活完这一天了
self.living_day += 1
class BaseSeekDay(six.with_metaclass(ABCMeta, object)):
def __init__(self):
# 每个追求每天消耗生命的常数
self.living_consume = 0
# 每个追求每天幸福指数常数
self.happiness_base = 0
# 每个追求每天财富积累常数
self.wealth_base = 0
# 每个追求每天名望权利积累常数
self.fame_base = 0
# 每个追求每天消耗生命的可变因素序列
self.living_factor = [0]
# 每个追求每天幸福指数的可变因素序列
self.happiness_factor = [0]
# 每个追求每天财富积累的可变因素序列
self.wealth_factor = [0]
# 每个追求每天名望权利的可变因素序列
self.fame_factor = [0]
# 追求了多少天了这一生
self.do_seek_day_cnt = 0
# 子类进行常数及可变因素序列设置
self._init_self()
@abstractmethod
def _init_self(self, *args, **kwargs):
# 子类必须实现,设置自己的生命消耗的常数,幸福指数常数等常数设置
pass
@abstractmethod
def _gen_living_days(self, *args, **kwargs):
# 子类必须实现,设置自己的可变因素序列
pass
def do_seek_day(self):
"""
每一天的追求具体seek
:return:
"""
# 生命消耗=living_consume:消耗常数 * happiness_factor:可变序列
if self.do_seek_day_cnt >= len(self.living_factor):
# 超出len(self.living_factor), 就取最后一个living_factor[-1]
consume_living = \
self.living_factor[-1] * self.living_consume
else:
# 每个类自定义这个追求的消耗生命常数,以及living_factor,比如
# HealthSeekDay追求健康,living_factor序列的值即由负值->正值
# 每个子类living_factor会有自己特点的变化速度及序列长度,导致每个
# 追求对生命的消耗随着追求的次数变化不一
consume_living = self.living_factor[self.do_seek_day_cnt] \
* self.living_consume
# 幸福指数=happiness_base:幸福常数 * happiness_factor:可变序列
if self.do_seek_day_cnt >= len(self.happiness_factor):
# 超出len(self.happiness_factor), 就取最后一个
# 由于happiness_factor值由:n—>0 所以happiness_factor[-1]=0
# 即随着追求一个事物的次数过多后会变的没有幸福感
happiness = self.happiness_factor[
-1] * self.happiness_base
else:
# 每个类自定义这个追求的幸福指数常数,以及happiness_factor
# happiness_factor子类的定义一般是从高->低变化
happiness = self.happiness_factor[
self.do_seek_day_cnt] * self.happiness_base
# 财富积累=wealth_base:积累常数 * wealth_factor:可变序列
if self.do_seek_day_cnt >= len(self.wealth_factor):
# 超出len(self.wealth_factor), 就取最后一个
wealth = self.wealth_factor[-1] * self.wealth_base
else:
# 每个类自定义这个追求的财富指数常数,以及wealth_factor
wealth = self.wealth_factor[
self.do_seek_day_cnt] * self.wealth_base
# 权利积累=fame_base:积累常数 * fame_factor:可变序列
if self.do_seek_day_cnt >= len(self.fame_factor):
# 超出len(self.fame_factor), 就取最后一个
fame = self.fame_factor[-1] * self.fame_base
else:
# 每个类自定义这个追求的名望权利指数常数,以及fame_factor
fame = self.fame_factor[
self.do_seek_day_cnt] * self.fame_base
# 追求了多少天了这一生 + 1
self.do_seek_day_cnt += 1
# 返回这个追求这一天对生命的消耗,得到的幸福,财富,名望权利
return consume_living, happiness, wealth, fame
def regular_mm(group):
# 最小-最大规范化
return (group - group.min()) / (group.max() - group.min())
"""
HealthSeekDay
"""
class HealthSeekDay(BaseSeekDay):
"""
HealthSeekDay追求健康长寿的一天:
形象:健身,旅游,娱乐,做感兴趣的事情。
抽象:追求健康长寿。
"""
def _init_self(self):
# 每天对生命消耗的常数=1,即代表1天
self.living_consume = 1
# 每天幸福指数常数=1
self.happiness_base = 1
# 设定可变因素序列
self._gen_living_days()
def _gen_living_days(self):
# 只生成12000个序列,因为下面的happiness_factor序列值由1->0
# 所以大于12000次的追求都将只是单纯消耗生命,并不增加幸福指数
# 即随着做一件事情的次数越来越多,幸福感越来越低,直到完全体会不到幸福
days = np.arange(1, 12000)
# 基础函数选用sqrt, 影响序列变化速度
living_days = np.sqrt(days)
"""
对生命消耗可变因素序列值由-1->1, 也就是这个追求一开始的时候对生命
的消耗为负增长,延长了生命,随着追求的次数不断增多对生命的消耗转为正
数因为即使一个人天天锻炼身体,天天吃营养品,也还是会有自然死亡的那
一天
"""
# *2-1的目的:regular_mm在0-1之间,HealthSeekDay要结果在-1,1之间
self.living_factor = regular_mm(living_days) * 2 - 1
# 结果在1-0之间 [::-1]: 将0->1转换到1->0
self.happiness_factor = regular_mm(days)[::-1]
def sample_621_1():
"""
6.2.1_1 你一生的故事:HealthSeekDay
:return:
"""
# 初始化我
me = Person()
# 初始化追求健康长寿快乐
seek_health = HealthSeekDay()
while me.living > 0:
# 只要还活着,就追求健康长寿快乐
me.live_one_day(seek_health)
print('只追求健康长寿快乐活了{}年,幸福指数{},积累财富{},名望权力{}'.format
(round(me.living_day / 365, 2), round(me.happiness, 2),
me.wealth, me.fame))
plt.plot(seek_health.living_factor * seek_health.living_consume)
plt.plot(seek_health.happiness_factor * seek_health.happiness_base)
plt.legend(['living_factor', 'happiness_factor'], loc='best')
plt.show()
"""
StockSeekDay
"""
class StockSeekDay(BaseSeekDay):
"""
StockSeekDay追求财富金钱的一天:
形象:做股票投资赚钱的事情。
抽象:追求财富金钱
"""
def _init_self(self, show=False):
# 每天对生命消耗的常数=2,即代表2天
self.living_consume = 2
# 每天幸福指数常数=0.5
self.happiness_base = 0.5
# 财富积累常数=10,默认=0
self.wealth_base = 10
# 设定可变因素序列
self._gen_living_days()
def _gen_living_days(self):
# 只生成10000个序列
days = np.arange(1, 10000)
# 针对生命消耗living_factor的基础函数还是sqrt
living_days = np.sqrt(days)
# 由于不需要像HealthSeekDay从负数开始,所以直接regular_mm 即:0->1
self.living_factor = regular_mm(living_days)
# 针对幸福感可变序列使用了np.power4,即变化速度比sqrt快
happiness_days = np.power(days, 4)
# 幸福指数可变因素会快速递减由1->0
self.happiness_factor = regular_mm(happiness_days)[::-1]
"""
这里简单设定wealth_factor=living_factor
living_factor(0-1), 导致wealth_factor(0-1), 即财富积累越到
后面越有效率,速度越快,头一个100万最难赚
"""
self.wealth_factor = self.living_factor
def sample_621_2():
"""
6.2.1_2 你一生的故事:StockSeekDay
:return:
"""
# 初始化我
me = Person()
# 初始化追求财富金钱
seek_stock = StockSeekDay()
while me.living > 0:
# 只要还活着,就追求财富金钱
me.live_one_day(seek_stock)
print('只追求财富金钱活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
(round(me.living_day / 365, 2), round(me.happiness, 2),
round(me.wealth, 2), me.fame))
plt.plot(seek_stock.living_factor * seek_stock.living_consume)
plt.plot(seek_stock.happiness_factor * seek_stock.happiness_base)
plt.legend(['living_factor', 'happiness_factor'], loc='best')
plt.show()
"""
FameSeekDay
"""
class FameSeekDay(BaseSeekDay):
"""
FameTask追求名望权力的一天:
追求名望权力
"""
def _init_self(self):
# 每天对生命消耗的常数=3,即代表3天
self.living_consume = 3
# 每天幸福指数常数=0.6
self.happiness_base = 0.6
# 名望权利积累常数=10,默认=0
self.fame_base = 10
# 设定可变因素序列
self._gen_living_days()
def _gen_living_days(self):
# 只生成12000个序列
days = np.arange(1, 12000)
# 针对生命消耗living_factor的基础函数还是sqrt
living_days = np.sqrt(days)
# 由于不需要像HealthSeekDay从负数开始,所以直接regular_mm 即:0->1
self.living_factor = regular_mm(living_days)
# 针对幸福感可变序列使用了np.power2
# 即变化速度比StockSeekDay慢但比HealthSeekDay快
happiness_days = np.power(days, 2)
# 幸福指数可变因素递减由1->0
self.happiness_factor = regular_mm(happiness_days)[::-1]
# 这里简单设定fame_factor=living_factor
self.fame_factor = self.living_factor
def sample_621_3():
"""
6.2.1_3 你一生的故事:FameSeekDay
:return:
"""
# 初始化我
me = Person()
# 初始化追求名望权力
seek_fame = FameSeekDay()
while me.living > 0:
# 只要还活着,就追求名望权力
me.live_one_day(seek_fame)
print('只追求名望权力活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
(round(me.living_day / 365, 2), round(me.happiness, 2),
round(me.wealth, 2), round(me.fame, 2)))
plt.plot(seek_fame.living_factor * seek_fame.living_consume)
plt.plot(seek_fame.happiness_factor * seek_fame.happiness_base)
plt.legend(['living_factor', 'happiness_factor'], loc='best')
plt.show()
"""
6.2.2 使用蒙特卡洛方法计算怎样度过一生最幸福
"""
def my_life(weights):
"""
追求健康长寿快乐的权重:weights[0]
追求财富金钱的权重:weights[1]
追求名望权力的权重:weights[2]
"""
# 追求健康长寿快乐
seek_health = HealthSeekDay()
# 追求财富金钱
seek_stock = StockSeekDay()
# 追求名望权力
seek_fame = FameSeekDay()
# 放在一个list中对对应下面np.random.choice中的index[0, 1, 2]
seek_list = [seek_health, seek_stock, seek_fame]
# 初始化我
me = Person()
# 加权随机抽取序列。80000天肯定够了, 80000天快220年了。。。
seek_choice = np.random.choice([0, 1, 2], 80000, p=weights)
while me.living > 0:
# 追求从加权随机抽取序列已经初始化好的
seek_ind = seek_choice[me.living_day]
seek = seek_list[seek_ind]
# 只要还活着,就追求
me.live_one_day(seek)
return round(me.living_day / 365, 2), round(me.happiness, 2), round(me.wealth, 2), round(me.fame, 2)
def sample_622():
"""
6.2.2 使用蒙特卡洛方法计算怎样度过一生最幸福
:return:
"""
living_day, happiness, wealth, fame = my_life([0.4, 0.3, 0.3])
print('活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format(
living_day, happiness, wealth, fame))
from abupy import AbuProgress
progress = AbuProgress(2000, 0, label='my_life...')
result = []
for pos, _ in enumerate(xrange(2000)):
# 2000次随机权重分配
weights = np.random.random(3)
weights /= np.sum(weights)
# result中:tuple[0]权重weights,,tuple[1]my_life返回的结果
result.append((weights, my_life(weights)))
progress.show(a_progress=pos + 1)
# result中tuple[1]=my_life返回的结果, my_life[1]=幸福指数,so->x[1][1]
sorted_scores = sorted(result, key=lambda p_x: p_x[1][1], reverse=True)
# 将最优权重sorted_scores[0][0]代入my_life得到结果
living_day, happiness, wealth, fame = my_life(sorted_scores[0][0])
print('活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
(living_day, happiness, wealth, fame))
print('人生最优权重:追求健康{:.3f},追求财富{:.3f},追求名望{:.3f}'.format(
sorted_scores[0][0][0], sorted_scores[0][0][1],
sorted_scores[0][0][2]))
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
"""
result中: tuple[0]权重weights, tuple[1]my_life返回的结果
r[0][0]: 追求健康长寿快乐的权重
r[0][1]: 追求财富金钱的权重
r[0][2]: 追求名望权力的权重
r[1][1]: my_life[1]=幸福指数
"""
result_show = np.array(
[[r[0][0], r[0][1], r[0][2], r[1][1]] for r in result])
fig = plt.figure(figsize=(9, 6))
ax = fig.gca(projection='3d')
ax.view_init(30, 60)
"""
x:追求健康长寿快乐的权重, y:追求财富金钱的权重
z:追求名望权力的权重, c:color 幸福指数, 颜色越深越幸福
"""
ax.scatter3D(result_show[:, 0], result_show[:, 1], result_show[:, 2],
c=result_show[:, 3], cmap='spring')
ax.set_xlabel('health')
ax.set_ylabel('stock')
ax.set_zlabel('fame')
plt.show()
# 幸福指数
happiness_result = result_show[:, 3]
# 使用qcut分10份
print('pd.qcut(happiness_result, 10).value_counts():\n', pd.qcut(happiness_result, 10).value_counts())
"""
6.2.3 凸优化基础概念
"""
# noinspection PyTypeChecker
def sample_623():
"""
6.2.3 趋势骨架图
:return:
"""
import scipy.optimize as sco
from scipy.interpolate import interp1d
# 继续使用TSLA收盘价格序列
# interp1d线性插值函数
linear_interp = interp1d(x, y)
# 绘制插值
plt.plot(linear_interp(x))
# fminbound寻找给定范围内的最小值:在linear_inter中寻找全局最优范围1-504
global_min_pos = sco.fminbound(linear_interp, 1, 504)
# 绘制全局最优点,全局最小值点,r<:红色三角
plt.plot(global_min_pos, linear_interp(global_min_pos), 'r<')
# 每个单位都先画一个点,由两个点连成一条直线形成股价骨架图
last_postion = None
# 步长50,每50个单位求一次局部最小
for find_min_pos in np.arange(50, len(x), 50):
# fmin_bfgs寻找给定值的局部最小值
local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=0)
# 形成最小点位置信息(x, y)
draw_postion = (local_min_pos, linear_interp(local_min_pos))
# 第一个50单位last_postion=none, 之后都有值
if last_postion is not None:
# 将两两临近局部最小值相连,两个点连成一条直线
plt.plot([last_postion[0][0], draw_postion[0][0]],
[last_postion[1][0], draw_postion[1][0]], 'o-')
# 将这个步长单位内的最小值点赋予last_postion
last_postion = draw_postion
plt.show()
def sample_624():
"""
6.2.4 全局最优求解怎样度过一生最幸福
:return:
"""
import scipy.optimize as sco
def minimize_happiness_global(weights):
if np.sum(weights) != 1:
# 过滤权重和不等于1的权重组合
return 0
# 最优都是寻找最小值,所以要得到幸福指数最大的权重,
# 返回-my_life,这样最小的结果其实是幸福指数最大的权重配比
return -my_life(weights)[1]
opt_global = sco.brute(minimize_happiness_global,
((0, 1.1, 0.1), (0, 1.1, 0.1), (0, 1.1, 0.1)))
print(opt_global)
living_day, happiness, wealth, fame = my_life(opt_global)
print('活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
(living_day, happiness, wealth, fame))
# noinspection PyTypeChecker
def sample_625():
"""
6.2.5 非凸函数计算怎样度过一生最幸福
:return:
"""
import scipy.optimize as sco
method = 'SLSQP'
# 提供一个函数来规范参数,np.sum(weights) = 1 -> np.sum(weights) - 1 = 0
constraints = ({'type': 'eq', 'fun': lambda p_x: np.sum(p_x) - 1})
# 参数的范围选定
bounds = tuple((0, 0.9) for _ in xrange(3))
print('bounds:', bounds)
def minimize_happiness_local(weights):
# print(weights)
return -my_life(weights)[1]
# 初始化猜测最优参数,这里使用brute计算出的全局最优参数作为guess
guess = [0.5, 0.2, 0.3]
opt_local = sco.minimize(minimize_happiness_local, guess,
method=method, bounds=bounds,
constraints=constraints)
print('opt_local:', opt_local)
# noinspection PyShadowingNames
def sample_626():
"""
6.2.6 标准凸函数求最优
:return:
"""
import scipy.optimize as sco
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
x = np.arange(-10, 10, 0.5)
y = np.arange(-10, 10, 0.5)
x_grid, y_grid = np.meshgrid(x, y)
# z^2 = x^2 + y^2
z_grid = x_grid ** 2 + y_grid ** 2
ax.plot_surface(x_grid, y_grid, z_grid, rstride=1, cstride=1,
cmap='hot')
plt.show()
def convex_func(xy):
return xy[0] ** 2 + xy[1] ** 2
bounds = ((-10, 10), (-10, 10))
guess = [5, 5]
for method in ['SLSQP', 'TNC', 'L-BFGS-B']:
# 打印start
print(method + ' start')
# noinspection PyTypeChecker
ret = sco.minimize(convex_func, guess, method=method,
bounds=bounds)
print(ret)
# 这里通过np.allclose判定结果是不是(0, 0)
print('result is (0, 0): {}'.format(
np.allclose(ret['x'], [0., 0.], atol=0.001)))
# 打印end
print(method + ' end')
"""
6.3 线性代数
"""
# 获取多支股票数据组成panel
my_stock_df = ABuSymbolPd.make_kl_df(
['usBIDU', 'usGOOG', 'usFB', 'usAAPL', 'us.IXIC'], n_folds=2)
# 变换轴向,形成新的切面
my_stock_df = my_stock_df.swapaxes('items', 'minor')
my_stock_df_close = my_stock_df['close'].dropna(axis=0)
def regular_std(group):
# z-score规范化也称零-均值规范化
return (group - group.mean()) / group.std()
def sample_630():
"""
获取多支股票数据组成panel
:return:
"""
print('my_stock_df_close.tail():\n', my_stock_df_close.tail())
my_stock_df_close_std = regular_std(my_stock_df_close)
my_stock_df_close_std.plot()
plt.show()
def sample_631():
"""
6.3.1 矩阵基础知识
:return:
"""
from scipy import linalg
# dataframe转换matrix通过as_matrix
cs_matrix = my_stock_df_close.as_matrix()
# cs_matrix本身有5列数据(5支股票),要变成方阵即保留5行数据0:5
cs_matrix = cs_matrix[0:5, :]
print('cs_matrix.shape:', cs_matrix.shape)
print('cs_matrix:\n', cs_matrix)
eye5 = np.eye(5)
print(eye5)
cs_matrix_inv = linalg.inv(cs_matrix)
print('逆矩阵: cs_matrix_inv')
print(cs_matrix_inv)
# 上面打印cs_matrix_inv输出上并非绝对标准单位矩阵,是对角线值元素接近与1,非对
# 角线元素接近与0的矩阵,需要使用np.allclose来确认结果
print('相乘后的结果是单位矩阵:{}'.format(
np.allclose(np.dot(cs_matrix, cs_matrix_inv), eye5)))
def sample_632():
"""
6.3.2 特征值和特征向量
:return:
"""
from scipy import mat, linalg
a = mat('[1.5 -0.5; -0.5 1.5]')
u, d = linalg.eig(a)
print('特征值向量:{}'.format(u))
print('特征向量(列向量)矩阵:{}'.format(d))
def sample_634():
"""
6.3.4 PCA和SVD使用实例
:return:
"""
from sklearn.decomposition import PCA
my_stock_df_close_std = regular_std(my_stock_df_close)
# n_components=1只保留一个维度
pca = PCA(n_components=1)
# 稍后会有展示fit_transform的实现,以及关键核心代码抽取
my_stock_df_trans_pca = \
pca.fit_transform(my_stock_df_close_std.as_matrix())
plt.plot(my_stock_df_trans_pca)
plt.show()
# 可视化维度和主成分关系,参数空
pca = PCA()
# 直接使用fit,不用fit_transform
pca.fit(my_stock_df_close_std)
# x:保留的维度 y:保留的维度下的方差比总和即保留了多少主成分
plt.plot(np.arange(1, len(pca.explained_variance_ratio_) + 1),
np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('component')
plt.ylabel('explained variance')
plt.show()
# 0.95即保留95%主成分
pca = PCA(0.95)
# 稍后会有展示fit_transform的实现,以及关键核心代码抽取
my_stock_df_trans_pca = \
pca.fit_transform(my_stock_df_close_std.as_matrix())
plt.plot(my_stock_df_trans_pca)
plt.show()
# noinspection PyPep8Naming
def my_pca(n_components=1):
from scipy import linalg
# svd奇异值分解
U, S, V = linalg.svd(my_stock_df_close_std.as_matrix(),
full_matrices=False)
# 通过n_components进行降维
U = U[:, :n_components]
U *= S[:n_components]
# 绘制降维后的矩阵
plt.plot(U)
# 输出如图6-19所示
my_pca(n_components=3)
plt.show()
if __name__ == "__main__":
sample_611_1()
# sample_611_2()
# sample_612()
# sample_613()
# sample_621_1()
# sample_621_2()
# sample_621_3()
# sample_622()
# sample_623()
# sample_624()
# sample_625()
# sample_626()
# sample_630()
# sample_631()
# sample_632()
# sample_634()
| gpl-3.0 |
jkarnows/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
jbkopecky/housebot | models/tda.py | 1 | 2498 | from pipelines import ItemSelector
from pipelines import MyOneHotEncoder
from pipelines import FindReplace
from sklearn.preprocessing import Imputer
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import numpy as np
import pandas as pd
import km
fr_arrondissement = [
("Le ", ""),
('-', "_"),
(' ', "_"),
('Saint', 'st'),
('_le_Pont', ''),
('_Perret', ''),
]
prepoc = Pipeline([
('Union', FeatureUnion([
('Surface', Pipeline([
('Selection', ItemSelector(['surface_m2'])),
('Normalise', MinMaxScaler()),
]),
),
('Max_Etages', Pipeline([
('Selection', ItemSelector(['etage', 'etage 2'])),
('Imputer', Imputer(strategy="most_frequent")),
('Normalise', MinMaxScaler()),
]),
),
('NoNaNFeats', Pipeline([
('Selection', ItemSelector(['piece'])),
('Normalise', MinMaxScaler()),
]),
),
('Description', Pipeline([
('Selection', ItemSelector('description')),
('vect', CountVectorizer(max_features=500)),
('tfidf', TfidfTransformer()),
]),
),
]),
),
])
data = pd.read_csv('./data/merged_data.csv', index_col=0)
n = len(data)
data = data.dropna(subset=['surface_m2', 'piece'])
print "[Warning] dropped %s samples because of NaN values" % (n-len(data))
X = np.array(prepoc.fit_transform(data).todense())
mapper = km.KeplerMapper(verbose=2)
projected_data = mapper.fit_transform(X, projection="dist_mean")
complex = mapper.map(projected_X=projected_data, inverse_X=X,
clusterer=km.cluster.DBSCAN(
eps=0.8,
n_jobs=-1,
min_samples=10
),
nr_cubes=10, overlap_perc=0.9
)
mapper.visualize(complex, path_html="./plots/test_tda.html",
title="Test",
color_function="average_signal_cluster"
)
| mit |
ajmendez/templog | bin/get_history.py | 1 | 1529 | #!/usr/bin/env python
import os
import json
import urllib2
from pymendez import auth
from matplotlib.dates import date2num
from datetime import datetime, timedelta
# URL = 'http://api.wunderground.com/api/{key}/history_{date}/q/{state}/{city}.json'
URL = 'http://api.wunderground.com/api/{key}/history_{date}/q/{station}.json'
outfile = os.path.expanduser('~/data/weather.json')
key = auth('weatherunderground', 'key')
# state = 'MD'
# city = 'Baltimore'
station='pws:KMDBALTI35'
date = datetime.now()
def get_weather(d):
# url = URL.format(key=key, date=d, state=state, city=city)
url = URL.format(key=key, date=d, station=station)
print url
f = urllib2.urlopen(url)
json_string = f.read()
parsed_json = json.loads(json_string)
return parsed_json
f.close()
try:
data = json.load(open(outfile, 'r'))
except:
data = {}
try:
for i in range(1,30):
print i
d = '{0.year}{0.month:02d}{0.day:02d}'.format(date-timedelta(days=i))
tmp = get_weather(d)
for obs in tmp['history']['observations']:
k = d+obs['date']['hour']+obs['date']['min']
print ' ',datetime.strptime(k, '%Y%m%d%H%M')
k = date2num(datetime.strptime(k, '%Y%m%d%H%M'))
if k in data:
if d not in ['20141102']:
raise ValueError('Already observed data')
data[k] = obs
except Exception as e:
print 'is Done?'
raise
finally:
json.dump(data, open(outfile,'w'), indent=2)
| gpl-2.0 |
mia1rab/seaborn | doc/sphinxext/plot_directive.py | 38 | 27578 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format),
dpi=dpi,
bbox_inches="tight")
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| bsd-3-clause |
kevin-intel/scikit-learn | asv_benchmarks/benchmarks/decomposition.py | 12 | 2754 | from sklearn.decomposition import (PCA, DictionaryLearning,
MiniBatchDictionaryLearning)
from .common import Benchmark, Estimator, Transformer
from .datasets import _olivetti_faces_dataset, _mnist_dataset
from .utils import make_pca_scorers, make_dict_learning_scorers
class PCABenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for PCA.
"""
param_names = ['svd_solver']
params = (['full', 'arpack', 'randomized'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _mnist_dataset()
def make_estimator(self, params):
svd_solver, = params
estimator = PCA(n_components=32,
svd_solver=svd_solver,
random_state=0)
return estimator
def make_scorers(self):
make_pca_scorers(self)
class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for DictionaryLearning.
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = DictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
max_iter=20,
tol=1e-16,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
class MiniBatchDictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchDictionaryLearning
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = MiniBatchDictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
batch_size=3,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
| bsd-3-clause |
idanivanov/catdtree | tests/classification/test_C45.py | 1 | 1664 | from nose import with_setup
from nose.tools import assert_equal
import pandas as pd
from catdtree.classification import C45
def test_fit():
tree_str_exp = u'''Root
|--> Weight <= 58
| |--> Eye Color is Green
| |--> Eye Color is Blue
|--> Weight > 58
| |--> Height <= 1.9
| | |--> Money in Bank <= 32456
| | | |--> Age <= 32
| | | | |--> Sex is Female
| | | | | |--> Age <= 28
| | | | | | |--> Eye Color is Brown
| | | | | | |--> Eye Color is Blue
| | | | | | |--> Eye Color is Green
| | | | | |--> Age > 28
| | | | |--> Sex is Male
| | | | | |--> Age <= 28
| | | | | | |--> Eye Color is Brown
| | | | | | |--> Eye Color is Blue
| | | | | | |--> Eye Color is Green
| | | | | |--> Age > 28
| | | |--> Age > 32
| | | | |--> Sex is Female
| | | | | |--> Eye Color is Blue
| | | | | |--> Eye Color is Green
| | | | |--> Sex is Male
| | | | | |--> Eye Color is Blue
| | | | | |--> Eye Color is Green
| | |--> Money in Bank > 32456
| | | |--> Eye Color is Brown
| | | |--> Eye Color is Blue
| |--> Height > 1.9
| | |--> Eye Color is Brown
| | |--> Eye Color is Blue
'''
hot_data = pd.read_csv('tests/hot.csv')
X, y = hot_data.drop('Hot', axis=1), hot_data['Hot']
model = C45()
model.fit(X, y)
tree_str = model.tree.show()
assert tree_str_exp == tree_str, 'The tree was not built as expected.'
| mit |
ojdo/rivus | rivus/gridder/create_grid.py | 2 | 12130 | # -*- coding: utf-8 -*-
import numpy as np
from itertools import product as iter_product
from shapely.geometry import Point, LineString
from geopandas import GeoDataFrame
from math import ceil
from geopy.distance import distance
from geopy import Point as gPoint
from pyproj import Proj
def _gen_grid_edges(point_matrix):
'''Connecting vertices in a chessboard manner
.. code-block:: none
0 0 0 0--0--0 0--0--0
| | |
0 0 0 -> 0--0--0 -> 0--0--0
| | |
0 0 0 0--0--0 0--0--0
Parameters
----------
point_matrix : numpy.arange
Two dimensional (matrix) with the coordinates of the vertices.
Returns
-------
list of Shapely.LineString
The connecting edges between vertices.
'''
lines = []
for row in point_matrix:
lines.extend([LineString(coords) for coords in zip(row[:-1], row[1:])])
for row in np.transpose(point_matrix, (1, 0, 2)):
lines.extend([LineString(coords) for coords in zip(row[:-1], row[1:])])
return lines
def _check_input(origo_latlon, num_edge_x, num_edge_y, dx, dy, noise_prop):
if len(origo_latlon) != 2 or not all([isinstance(c, (int, float))
for c in origo_latlon]):
raise TypeError('Origo_latlon has nan element(s)')
if all([a < 1 for a in (num_edge_x, num_edge_y)]):
raise ValueError('Both of the edge dimensions cannot be <1.')
if any([a < 0 for a in (dx, dy, noise_prop)]):
raise ValueError('dx, dy, noise_prop must be positive numbers.')
def create_square_grid(origo_latlon=(48.26739, 11.66842), num_edge_x=1,
num_edge_y=None, dx=100, dy=None, noise_prop=0.0,
epsg=None, match=0):
'''Create chessboard grid with edges and vertices
on WGS84 suface with vincenty distance calculation
lat ~ x, lon ~ y
Parameters
----------
origo_latlon : tuple, optional
WGS84 latlon coordinates of the bottom left grid point
defaults to some the TUM-ENS dep. ;]
num_edge_x : int, optional
how many edges horizontally
num_edge_y : None, optional
How many edgey vertically
dx : int, optional
length of the horizontal edges (in meters)
dy : None, optional
length of the vertical edges (in meters)
noise_prop : float, optional
0.0 to MAX_NOISE (< 1.0) missplacement radius relative to dx and dy.
epsg : int, optional
If a valid epsg code which is supported py pyproy,
the coordinates are calculated in the carthesian UTM CRS
and then transformed into epsg4326 (latlon).
If `None` or omitted, then the coordinates are calculated
directly in epsg4326 with vincenty's formula for distance
and the grid lines up with the North and East directions
match : enumerated values, optional
+ `0` - vertices and edges are matched by the logic of generation
(faster as less calculation is needed.)
+ `1` - matching is done geographicaly
with pandashp helper (slower, but flexible)
Return
------
list of GeoDataFrames
+ vertices : with [geometry, Vertex] columns
+ edges : with [geometry, Edge, Vertex1, Vertex2] columns
Note
----
Sequence of IDs:
From buttom left to upper right.
From row to row.
From left to right.
.. code-block:: none
bearing 0
(6)══04══(7)══05══(8)
║ ║ ║
7 9 11
║ ║ ║
(3)══02══(4)══03══(4)
^ ║ ║ ║
(y) 6 8 10
L ║ ║ ║
A (0)══00══(1)══01══(2)
T
LON (x) -> bearing 90
Raises
------
ValueError
Not supported epsg number
'''
# INIT
# ---- Grid structure
lat, lon = origo_latlon
dy = dx if not dy else dy
num_edge_y = num_edge_x if not num_edge_y else num_edge_y
num_vert_x = num_edge_x + 1
num_vert_y = num_edge_y + 1
match = 0 if match not in [0, 1] else match
# ---- Noise
MAX_NOISE = 0.45 # relative to dx dy
fuzz_radius_x = dx * noise_prop
fuzz_radius_y = dy * noise_prop
if noise_prop > MAX_NOISE:
fuzz_radius_x = MAX_NOISE * dx
fuzz_radius_y = MAX_NOISE * dy
_check_input(origo_latlon, num_edge_x, num_edge_y, dx, dy, noise_prop)
# Generate offset point coordinates
if epsg is None: # in LatLon system
# getting new points based on https://stackoverflow.com/a/24429798
# Convert to geopy distance
crsinit = {'init': 'epsg:4326'}
dx = distance(meters=dx)
dy = distance(meters=dy)
points = []
startp = gPoint([lat, lon])
# create the grid coordinates
for _ in range(num_vert_y):
# In lon(x), lat(y) order to be passed to Shapeley.Point()
# Bearing 90->East 0->North
points.append([startp.longitude, startp.latitude])
_startp = startp
for _ in range(num_edge_x):
_startp = dx.destination(point=_startp, bearing=90)
points.append([_startp.longitude, _startp.latitude])
startp = dy.destination(point=startp, bearing=0)
else: # in UTM XY coord system
try:
UTMXX = Proj(init='epsg:{}'.format(epsg))
crsinit = {'init': 'epsg:{}'.format(epsg)} # for GeoDataFrame
except:
raise ValueError('Not supported epsg number, \
only Proj4 init epsg numbers are supported')
ox, oy = UTMXX(lon, lat)
coords_x = np.arange(ox, ox + (dx * num_vert_x), dx)
coords_y = np.arange(oy, oy + (dy * num_vert_y), dy)
points = [(x, y) for y, x in iter_product(coords_y, coords_x,
repeat=1)]
# Add fuzz
if noise_prop > 0.0:
def _fuzz(xy):
if epsg is not None:
return [xy[ii] + lim * (2 * np.random.rand() - 1)
for ii, lim in enumerate((fuzz_radius_x,
fuzz_radius_y))]
else:
lon, lat = xy
fromP = gPoint([lat, lon])
lon_dist = distance(meters=(fuzz_radius_x * np.random.rand()))
lat_dist = distance(meters=(fuzz_radius_y * np.random.rand()))
newX = lon_dist.destination(point=fromP, bearing=90)
newY = lat_dist.destination(point=fromP, bearing=0)
return [newX.longitude, newY.latitude]
points = list(map(lambda xy: _fuzz(xy), points))
# Create Shapely objects
vertices = [Point(coo) for coo in points]
# reshape(num_rows, num_cols) --> num_vert_y is the number of rows.
# As it counts the elements in a column along the y axis.
point_matrix = np.array(points).reshape(num_vert_y, num_vert_x, 2)
edges = _gen_grid_edges(point_matrix)
# Create GeoDataFrames
vdf = GeoDataFrame(geometry=vertices, crs=crsinit)
vdf['Vertex'] = vdf.index # ; vdf.set_index('Vertex', inplace=True)
edf = GeoDataFrame(geometry=edges, crs=crsinit)
edf['Edge'] = edf.index # ; edf.set_index('Edge', inplace=True)
# Match Vertex1 and Vertex2 columns to Vertex index
if match == 1:
from ..utils import pandashp as pdshp # to match vertices and edges
pdshp.match_vertices_and_edges(vdf, edf)
elif match == 0:
v1s = []
v2s = []
indices = np.arange(
num_vert_x * num_vert_y).reshape(num_vert_y, num_vert_x)
for row in indices:
v1s.extend(row[:-1])
v2s.extend(row[1:])
for col in indices.T:
v1s.extend(col[:-1])
v2s.extend(col[1:])
edf['Vertex1'] = v1s
edf['Vertex2'] = v2s
if epsg is not None:
vdf.to_crs(epsg=4326, inplace=True)
edf.to_crs(epsg=4326, inplace=True)
return (vdf, edf)
def get_source_candidates(vdf, dim_x, dim_y, logic='sym'):
"""Calculate the set of indexes of the vertices, which are worth testing
as source vertex in a single commodity case. A square grid is assumed.
"Worth" means:
The minimal set of vertices which cover the main symmetrical positions.
Parameters
----------
vdf : pandas DataFrame
The vertex frame. (Created by create_square_grid())
dim_x : int
Number of vertices along the x axis.
dim_y : int
Number of vertices along the y axis.
logic : str, optional default='sym'
what kind or source candidates are looked for.
+ sym - Minimal(ish) set of vertices based on symmetry.
E.g. here the indices marked with * are selected.
::
18, 19, 20, 21, 22, 23
12, 13, 14, 15, 16, 17
*6, *7, *8, 9, 10, 11
*0, *1, *2, 3, 4, 5
+ extrema - Pairs of vertices possibly further away from each other.
Say: combination of the corners.
0: diagonal (0-23)
1: x-edge (0-5)
2: y-edge (0-18) if x-y have different lengths
+ center - One corner and one center-ish ID
Returns
-------
List of different dimensions
+ smy : 1D list [1,2,6,7,8]
+ extrema, center : 2D list - list of lists [[0,23],[0,5],[0,18]]
Raises
------
ValueError
Unsupported source vertex calculation logic
"""
mat = vdf.index.values.reshape(dim_y, dim_x)
lim_x = ceil(dim_x / 2)
lim_y = ceil(dim_y / 2)
if logic == 'sym':
return mat[0:lim_y, 0:lim_x].flatten().tolist()
elif logic == 'center':
return [[0, mat[lim_y, lim_x]], ]
elif logic == 'extrema':
corners = [(0, 0),
(0, dim_x - 1),
(dim_y - 1, 0),
(dim_y - 1, dim_x - 1)]
if dim_y == dim_x:
borders = [[mat[corners[0]], mat[corners[3]]],
[mat[corners[0]], mat[corners[1]]]]
else:
borders = [[mat[corners[0]], mat[corners[3]]],
[mat[corners[0]], mat[corners[1]]],
[mat[corners[0]], mat[corners[2]]]]
return borders
else:
raise ValueError('Unsupported source vertex calculation logic: <{}>'
.format(logic))
# Run Examples / Tests if script is executed directly
if __name__ == '__main__':
import matplotlib.pyplot as plt
test0ver, test0edg = create_square_grid(
num_edge_x=3, num_edge_y=2, noise_prop=0.0, epsg=32632)
# test1ver, test1edg = create_square_grid(num_edge_x=6, noise_prop=0.1, epsg=32632)
# test2ver, test2edg = create_square_grid(num_edge_x=6, noise_prop=0.2)
# test3ver, test3edg = create_square_grid(num_edge_x=6, noise_prop=0.3)
# test4ver, test4edg = create_square_grid(num_edge_x=6, noise_prop=0.4)
# test5ver, test5edg = create_square_grid(num_edge_x=6, noise_prop=.45)
fig, axes = plt.subplots(2, 3, figsize=(10, 6))
for ij in iter_product(range(2), repeat=2):
axes[ij].set_aspect('equal')
test0ver.plot(ax=axes[0, 0], marker='o', color='red', markersize=5)
test0edg.plot(ax=axes[0, 0], color='blue')
# test1ver.plot(ax=axes[0, 1], marker='o', color='red', markersize=5)
# test1edg.plot(ax=axes[0, 1], color='blue')
# test2ver.plot(ax=axes[0, 2], marker='o', color='red', markersize=5)
# test2edg.plot(ax=axes[0, 2], color='blue')
# test3ver.plot(ax=axes[1, 0], marker='o', color='red', markersize=5)
# test3edg.plot(ax=axes[1, 0], color='blue')
# test4ver.plot(ax=axes[1, 1], marker='o', color='red', markersize=5)
# test4edg.plot(ax=axes[1, 1], color='blue')
# test5ver.plot(ax=axes[1, 2], marker='o', color='red', markersize=5)
# test5edg.plot(ax=axes[1, 2], color='blue')
| gpl-3.0 |
WuShichao/computational-physics | 3/3_27/3_27.py | 1 | 1365 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 20:23:34 2016
Phase space plot of the Lorenz model, z versus y, z versus x,
uisng Euler method.
@author: nightwing
"""
import matplotlib.pyplot as plt
def LORENZ_MODEL(x, y, z, r):
delta = 10 # argument delta
b = 8.0 / 3 # argument b
t = 0 # initial time
dt = 0.0001 # time step
t_end = 800 # end of time
displacement_1 = [] # this list store displacement 1
displacement_2 = [] # this list store displacement 2
while t <= t_end:
if t >= 30: # allow for the decay of initial transients
if abs(y) < 0.001:
displacement_1.append(x)
displacement_2.append(z)
x += (delta * (y - x)) * dt
y += (- x * z + r * x - y) * dt
z += (x * y - b * z) * dt
t += dt
return [displacement_1, displacement_2]
# -----------------caculate------------------
init_x = 0
init_y = 1
init_z = 1
phase_space = LORENZ_MODEL(init_x, init_y, init_z, 25)
# --------------graph---------------
plt.figure(figsize=(8, 6))
plt.title("Phase space plot: z versus x when y = 0")
plt.text(-15, 5, "x = %d, y = %d, z = %d" % (init_x, init_y, init_z))
plt.xlabel("x")
plt.ylabel("z")
plt.scatter(phase_space[0], phase_space[1], s=1)
plt.xlim(-20, 20)
plt.ylim(0, 40)
plt.show()
| gpl-3.0 |
nmayorov/scikit-learn | sklearn/gaussian_process/gpr.py | 43 | 18642 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
adammenges/statsmodels | statsmodels/sandbox/tsa/diffusion.py | 31 | 18732 | '''getting started with diffusions, continuous time stochastic processes
Author: josef-pktd
License: BSD
References
----------
An Algorithmic Introduction to Numerical Simulation of Stochastic Differential
Equations
Author(s): Desmond J. Higham
Source: SIAM Review, Vol. 43, No. 3 (Sep., 2001), pp. 525-546
Published by: Society for Industrial and Applied Mathematics
Stable URL: http://www.jstor.org/stable/3649798
http://www.sitmo.com/ especially the formula collection
Notes
-----
OU process: use same trick for ARMA with constant (non-zero mean) and drift
some of the processes have easy multivariate extensions
*Open Issues*
include xzero in returned sample or not? currently not
*TODOS*
* Milstein from Higham paper, for which processes does it apply
* Maximum Likelihood estimation
* more statistical properties (useful for tests)
* helper functions for display and MonteCarlo summaries (also for testing/checking)
* more processes for the menagerie (e.g. from empirical papers)
* characteristic functions
* transformations, non-linear e.g. log
* special estimators, e.g. Ait Sahalia, empirical characteristic functions
* fft examples
* check naming of methods, "simulate", "sample", "simexact", ... ?
stochastic volatility models: estimation unclear
finance applications ? option pricing, interest rate models
'''
from __future__ import print_function
import numpy as np
from scipy import stats, signal
import matplotlib.pyplot as plt
#np.random.seed(987656789)
class Diffusion(object):
'''Wiener Process, Brownian Motion with mu=0 and sigma=1
'''
def __init__(self):
pass
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t
class AffineDiffusion(Diffusion):
'''
differential equation:
:math::
dx_t = f(t,x)dt + \sigma(t,x)dW_t
integral:
:math::
x_T = x_0 + \\int_{0}^{T}f(t,S)dt + \\int_0^T \\sigma(t,S)dW_t
TODO: check definition, affine, what about jump diffusion?
'''
def __init__(self):
pass
def sim(self, nobs=100, T=1, dt=None, nrepl=1):
# this doesn't look correct if drift or sig depend on x
# see arithmetic BM
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dx = self._drift() + self._sig() * W
x = np.cumsum(dx,1)
xmean = x.mean(0)
return x, xmean, t
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
if dt is None:
dt = T*1.0/nobs
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dW = self.dW
t = np.linspace(dt, 1, nobs)
Dt = Tratio*dt;
L = nobs/Tratio; # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)); # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem
'''
R = 4; Dt = R*dt; L = N/R; % L EM steps of size Dt = R*dt
Xem = zeros(1,L); % preallocate for efficiency
Xtemp = Xzero;
for j = 1:L
Winc = sum(dW(R*(j-1)+1:R*j));
Xtemp = Xtemp + Dt*lambda*Xtemp + mu*Xtemp*Winc;
Xem(j) = Xtemp;
end
'''
class ExactDiffusion(AffineDiffusion):
'''Diffusion that has an exact integral representation
this is currently mainly for geometric, log processes
'''
def __init__(self):
pass
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
t = np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter doesn't handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.norm(loc=meant, scale=stdt)
class ArithmeticBrownian(AffineDiffusion):
'''
:math::
dx_t &= \\mu dt + \\sigma dW_t
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
return self.mu
def _sig(self, *args, **kwds):
return self.sigma
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
t = np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = self._drift * t
stdt = self._sigma * np.sqrt(t)
return stats.norm(loc=meant, scale=stdt)
class GeometricBrownian(AffineDiffusion):
'''Geometric Brownian Motion
:math::
dx_t &= \\mu x_t dt + \\sigma x_t dW_t
$x_t $ stochastic process of Geometric Brownian motion,
$\mu $ is the drift,
$\sigma $ is the Volatility,
$W$ is the Wiener process (Brownian motion).
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.mu * x
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
class OUprocess(AffineDiffusion):
'''Ornstein-Uhlenbeck
:math::
dx_t&=\\lambda(\\mu - x_t)dt+\\sigma dW_t
mean reverting process
TODO: move exact higher up in class hierarchy
'''
def __init__(self, xzero, mu, lambd, sigma):
self.xzero = xzero
self.lambd = lambd
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.lambd * (self.mu - x)
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
def exact(self, xzero, t, normrvs):
#TODO: aggregate over time for process with observations for all t
# i.e. exact conditional distribution for discrete time increment
# -> exactprocess
#TODO: for single t, return stats.norm -> exactdist
expnt = np.exp(-self.lambd * t)
return (xzero * expnt + self.mu * (1-expnt) +
self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd) * normrvs)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter doesn't handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
#TODO: aggregate over time for process with observations for all t
#TODO: for single t, return stats.norm
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self.mu * (1-expnt)
stdt = self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd)
from scipy import stats
return stats.norm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:])
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma
class SchwartzOne(ExactDiffusion):
'''the Schwartz type 1 stochastic process
:math::
dx_t = \\kappa (\\mu - \\ln x_t) x_t dt + \\sigma x_tdW \\
The Schwartz type 1 process is a log of the Ornstein-Uhlenbeck stochastic
process.
'''
def __init__(self, xzero, mu, kappa, sigma):
self.xzero = xzero
self.mu = mu
self.kappa = kappa
self.lambd = kappa #alias until I fix exact
self.sigma = sigma
def _exactconst(self, expnt):
return (1-expnt) * (self.mu - self.sigma**2 / 2. /self.kappa)
def _exactstd(self, expnt):
return self.sigma * np.sqrt((1-expnt*expnt)/2./self.kappa)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
lnxzero = np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
#TODO: check this is still wrong, just guessing
meant = np.log(xzero) * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.lognorm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]))
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,): mu = mu[0] # how to remove scalar array ?
if np.shape(sigma)== (1,): sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma
class BrownianBridge(object):
def __init__(self):
pass
def simulate(self, x0, x1, nobs, nrepl=1, ddt=1., sigma=1.):
nobs=nobs+1
dt = ddt*1./nobs
t = np.linspace(dt, ddt-dt, nobs)
t = np.linspace(dt, ddt, nobs)
wm = [t/ddt, 1-t/ddt]
#wmi = wm[1]
#wm1 = x1*wm[0]
wmi = 1-dt/(ddt-t)
wm1 = x1*(dt/(ddt-t))
su = sigma* np.sqrt(t*(1-t)/ddt)
s = sigma* np.sqrt(dt*(ddt-t-dt)/(ddt-t))
x = np.zeros((nrepl, nobs))
x[:,0] = x0
rvs = s*np.random.normal(size=(nrepl,nobs))
for i in range(1,nobs):
x[:,i] = x[:,i-1]*wmi[i] + wm1[i] + rvs[:,i]
return x, t, su
class CompoundPoisson(object):
'''nobs iid compound poisson distributions, not a process in time
'''
def __init__(self, lambd, randfn=np.random.normal):
if len(lambd) != len(randfn):
raise ValueError('lambd and randfn need to have the same number of elements')
self.nobj = len(lambd)
self.randfn = randfn
self.lambd = np.asarray(lambd)
def simulate(self, nobs, nrepl=1):
nobj = self.nobj
x = np.zeros((nrepl, nobs, nobj))
N = np.random.poisson(self.lambd[None,None,:], size=(nrepl,nobs,nobj))
for io in range(nobj):
randfnc = self.randfn[io]
nc = N[:,:,io]
#print nrepl,nobs,nc
#xio = randfnc(size=(nrepl,nobs,np.max(nc))).cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
rvs = randfnc(size=(nrepl,nobs,np.max(nc)))
print('rvs.sum()', rvs.sum(), rvs.shape)
xio = rvs.cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
#print xio.shape
x[:,:,io] = xio
x[N==0] = 0
return x, N
'''
randn('state',100) % set the state of randn
T = 1; N = 500; dt = T/N; t = [dt:dt:1];
M = 1000; % M paths simultaneously
dW = sqrt(dt)*randn(M,N); % increments
W = cumsum(dW,2); % cumulative sum
U = exp(repmat(t,[M 1]) + 0.5*W);
Umean = mean(U);
plot([0,t],[1,Umean],'b-'), hold on % plot mean over M paths
plot([0,t],[ones(5,1),U(1:5,:)],'r--'), hold off % plot 5 individual paths
xlabel('t','FontSize',16)
ylabel('U(t)','FontSize',16,'Rotation',0,'HorizontalAlignment','right')
legend('mean of 1000 paths','5 individual paths',2)
averr = norm((Umean - exp(9*t/8)),'inf') % sample error
'''
if __name__ == '__main__':
doplot = 1
nrepl = 1000
examples = []#['all']
if 'all' in examples:
w = Diffusion()
# Wiener Process
# ^^^^^^^^^^^^^^
ws = w.simulateW(1000, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(ws[0].T)
tmp = plt.plot(ws[0].mean(0), linewidth=2)
plt.title('Standard Brownian Motion (Wiener Process)')
func = lambda t, W: np.exp(t + 0.5*W)
us = w.expectedsim(func, nobs=500, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(us[0].T)
tmp = plt.plot(us[1], linewidth=2)
plt.title('Brownian Motion - exp')
#plt.show()
averr = np.linalg.norm(us[1] - np.exp(9*us[2]/8.), np.inf)
print(averr)
#print us[1][:10]
#print np.exp(9.*us[2][:10]/8.)
# Geometric Brownian
# ^^^^^^^^^^^^^^^^^^
gb = GeometricBrownian(xzero=1., mu=0.01, sigma=0.5)
gbs = gb.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(gbs.T)
tmp = plt.plot(gbs.mean(0), linewidth=2)
plt.title('Geometric Brownian')
plt.figure()
tmp = plt.plot(np.log(gbs).T)
tmp = plt.plot(np.log(gbs.mean(0)), linewidth=2)
plt.title('Geometric Brownian - log-transformed')
ab = ArithmeticBrownian(xzero=1, mu=0.05, sigma=1)
abs = ab.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(abs.T)
tmp = plt.plot(abs.mean(0), linewidth=2)
plt.title('Arithmetic Brownian')
# Ornstein-Uhlenbeck
# ^^^^^^^^^^^^^^^^^^
ou = OUprocess(xzero=2, mu=1, lambd=0.5, sigma=0.1)
ous = ou.simEM()
oue = ou.exact(1, 1, np.random.normal(size=(5,10)))
ou.exact(0, np.linspace(0,10,10/0.1), 0)
ou.exactprocess(0,10)
print(ou.exactprocess(0,10, ddt=0.1,nrepl=10).mean(0))
#the following looks good, approaches mu
oues = ou.exactprocess(0,100, ddt=0.1,nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(oues.T)
tmp = plt.plot(oues.mean(0), linewidth=2)
plt.title('Ornstein-Uhlenbeck')
# SchwartsOne
# ^^^^^^^^^^^
so = SchwartzOne(xzero=0, mu=1, kappa=0.5, sigma=0.1)
sos = so.exactprocess(0,50, ddt=0.1,nrepl=100)
print(sos.mean(0))
print(np.log(sos.mean(0)))
doplot = 1
if doplot:
plt.figure()
tmp = plt.plot(sos.T)
tmp = plt.plot(sos.mean(0), linewidth=2)
plt.title('Schwartz One')
print(so.fitls(sos[0,:],dt=0.1))
sos2 = so.exactprocess(0,500, ddt=0.1,nrepl=5)
print('true: mu=1, kappa=0.5, sigma=0.1')
for i in range(5):
print(so.fitls(sos2[i],dt=0.1))
# Brownian Bridge
# ^^^^^^^^^^^^^^^
bb = BrownianBridge()
#bbs = bb.sample(x0, x1, nobs, nrepl=1, ddt=1., sigma=1.)
bbs, t, wm = bb.simulate(0, 0.5, 99, nrepl=500, ddt=1., sigma=0.1)
if doplot:
plt.figure()
tmp = plt.plot(bbs.T)
tmp = plt.plot(bbs.mean(0), linewidth=2)
plt.title('Brownian Bridge')
plt.figure()
plt.plot(wm,'r', label='theoretical')
plt.plot(bbs.std(0), label='simulated')
plt.title('Brownian Bridge - Variance')
plt.legend()
# Compound Poisson
# ^^^^^^^^^^^^^^^^
cp = CompoundPoisson([1,1], [np.random.normal,np.random.normal])
cps = cp.simulate(nobs=20000,nrepl=3)
print(cps[0].sum(-1).sum(-1))
print(cps[0].sum())
print(cps[0].mean(-1).mean(-1))
print(cps[0].mean())
print(cps[1].size)
print(cps[1].sum())
#Note Y = sum^{N} X is compound poisson of iid x, then
#E(Y) = E(N)*E(X) eg. eq. (6.37) page 385 in http://ee.stanford.edu/~gray/sp.html
#plt.show()
| bsd-3-clause |
neherlab/treetool | augur/src/mutation_tree.py | 1 | 16279 | import time, re, os, argparse,shutil, sys
from tree_refine import tree_refine
from virus_clean import virus_clean
from virus_filter import flu_filter
from date_util import numerical_date
from collections import defaultdict
from process import process, virus_config
from Bio import SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
import numpy as np
from itertools import izip
path_to_augur = './' + '/'.join(sys.argv[0].split('/')[:-2])
std_outgroup_file_blast = path_to_augur+'/source-data/outgroups.fasta'
std_outgroup_file_nuc = path_to_augur+'/source-data/outgroups_nucleotides_unspliced.fasta'
no_raxml_threshold = 15000
virus_config.update({
# data source and sequence parsing/cleaning/processing
'fasta_fields':{0:'strain', 1:'isolate_id', 2:'date', 3:'subtype', 4:'country', 5:'region', 7:'host', 6:'passage'},
'cds':[0,None], # define the HA start i n 0 numbering
'verbose':3
})
def get_date(strain):
from datetime import datetime
date_str = strain.split('|')[2]
try:
collection_date = datetime.strptime(date_str, '%Y-%m-%d')
return collection_date.strftime('%Y-%m-%d')
except:
collection_date = datetime.strptime(date_str[:4], '%Y')
return collection_date.strftime('%Y-%m-%d')
class mutation_tree(process, flu_filter, tree_refine, virus_clean):
"""docstring for mutation_tree"""
def __init__(self, aln_fname, outgroup, include_ref_strains = True, outdir = './', formats = ['pdf','png'], verbose = 0, **kwargs):
process.__init__(self, **kwargs)
flu_filter.__init__(self, alignment_file = aln_fname, **kwargs)
tree_refine.__init__(self, **kwargs)
virus_clean.__init__(self, **kwargs)
self.midpoint_rooting = False
self.include_ref_strains = include_ref_strains
self.verbose = verbose
self.formats = formats
self.outdir = outdir.rstrip('/')+'/'
self.auspice_tree_fname = self.outdir + 'tree.json'
self.auspice_align_fname = self.outdir + 'aln.fasta'
self.auspice_aa_align_fname = self.outdir + 'aa_aln.fasta'
self.auspice_sequences_fname = self.outdir + 'sequences.json'
self.auspice_frequencies_fname = None
self.auspice_meta_fname = self.outdir + 'meta.json'
self.path_to_augur = path_to_augur
if os.path.isfile(outgroup):
tmp = [{'strain':seq.name, 'seq':str(record.seq).upper(), 'desc':seq.description}
for seq in SeqIO.parse(outgroup, 'fasta') ]
if len(tmp):
self.outgroup = tmp[0]
if len(tmp)>1:
print "More than one sequence in ", outgroup, "taking first"
if self.verbose:
print "using outgroup found in file ", outgroup
elif outgroup=='auto':
print "automatically determine outgroup"
self.auto_outgroup_blast()
elif isinstance(outgroup, basestring):
seq_names = [x['strain'] for x in self.viruses]
if outgroup in seq_names:
self.outgroup = self.viruses.pop(seq_names.index(outgroup))
if self.verbose:
print "using outgroup found in alignment", outgroup
else:
standard_outgroups = self.load_standard_outgroups()
if outgroup in standard_outgroups:
self.outgroup = standard_outgroups[outgroup]
if self.verbose:
print "using standard outgroup", outgroup
else:
raise ValueError("outgroup %s not found" % outgroup)
return
if "anno:" in self.outgroup['desc']:
anno = [x for x in self.outgroup['desc'].split() if "anno:" in x][0]
anno = (anno.split(':')[1]).split('_')
tmp = [(anno[2*i], int(anno[2*i+1])) for i in range(len(anno)/2)]
self.anno = sorted(tmp, key=lambda x:x[1])
print("Using annotation",self.anno)
else:
self.anno = None
print("No annotation found")
#self.anno = sorted((('SP',0), ('HA1',16), ('HA2',329+16)), key=lambda x:x[1])
self.viruses.append(self.outgroup)
self.filter_geo(prune=False)
self.make_strain_names_unique()
def load_standard_outgroups(self):
return {'|'.join(seq.description.split()[1].split('|')[:2]).replace(' ',''):
{'seq':str(seq.seq).upper(),
'strain':seq.description.split()[1].split('|')[1].replace(' ',''),
'desc':seq.description,
'date':get_date(seq.description)}
for seq in SeqIO.parse(std_outgroup_file_nuc, 'fasta')}
def auto_outgroup_blast(self):
from random import sample
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.Blast import NCBIXML
self.make_run_dir()
nvir = 10
max_ref_seqs = 5
tmp_dates = []
for v in self.viruses:
try:
tmp_dates.append(numerical_date(v["date"]))
except:
print("Can't parse date for",v['strain'], v['date'])
earliest_date = np.min(tmp_dates)
all_strains = [v["strain"] for v in self.viruses]
representatives = [SeqRecord(Seq(v['seq']), id=v['strain']) for v in sample(self.viruses, min(nvir, len(self.viruses)))]
standard_outgroups = self.load_standard_outgroups()
SeqIO.write(representatives, self.run_dir+'representatives.fasta', 'fasta')
blast_out = self.run_dir+"outgroup_blast.xml"
blast_cline = NcbiblastxCommandline(query=self.run_dir+"representatives.fasta", db=std_outgroup_file_blast, evalue=0.01,
outfmt=5, out=blast_out)
stdout, stderr = blast_cline()
with open(blast_out, 'r') as bfile:
og_blast = NCBIXML.parse(bfile)
by_og = defaultdict(list)
for rep in og_blast:
for hit in rep.alignments:
for aln in hit.hsps:
by_og[hit.hit_def].append((rep.query, aln.score, aln.score/aln.align_length, 1.0*aln.identities/aln.align_length))
by_og = by_og.items()
print by_og[1]
# sort by number of hits, then mean score
by_og.sort(key = lambda x:(len(x[1]), np.mean([y[1] for y in x[1]])), reverse=True)
outgroups_older_than_sample = [(og, hits) for (og, hits) in by_og
if (numerical_date(standard_outgroups[og]['date'])<earliest_date-5) or
('A/California/07/2009' in standard_outgroups[og]['strain'])]
if len(outgroups_older_than_sample) and np.mean([y[-1] for y in outgroups_older_than_sample[0][1]])>0.8:
outgroup = outgroups_older_than_sample[0][0]
else:
outgroup = by_og[0][0]
self.midpoint_rooting = True
print("will root at midpoint")
for oi, (ref, hits) in enumerate(by_og):
if (np.max([y[-1] for y in hits])>0.9+oi*0.02) and ref!=outgroup:
self.viruses.append(standard_outgroups[ref])
print("including reference strain ",ref, [y[-1] for y in hits])
if oi>max_ref_seqs:
break
self.outgroup = standard_outgroups[outgroup]
if 'A/California/07/2009' not in self.outgroup['strain']:
self.outgroup['strain']+='OG'
prot = Seq(self.outgroup['seq']).translate(to_stop=True)
self.cds = [0,min(len(prot)*3,len(self.outgroup['seq']))]
print("chosen outgroup",self.outgroup['strain'])
def refine(self):
self.node_lookup = {node.taxon.label:node for node in self.tree.leaf_iter()}
self.unique_date()
if 'A/California/07/2009' not in self.outgroup['strain']:
self.remove_outgroup()
self.ladderize()
self.collapse()
self.add_nuc_mutations()
self.add_node_attributes()
if self.cds is not None:
self.translate_all()
self.add_aa_mutations()
if self.anno is not None:
divides = np.array([x[1] for x in self.anno])
for node in self.tree.postorder_node_iter():
node.alt_aa_muts = ""
tmp = defaultdict(list)
if len(node.aa_muts):
for mut in node.aa_muts.split(','):
anc,pos,der = mut[0], int(mut[1:-1]), mut[-1]
ii = divides.searchsorted(pos)-1
if ii>0:
tmp[ii].append(anc+str(pos-divides[ii])+der)
for ii, anno in enumerate(self.anno):
if len(tmp[ii]):
node.alt_aa_muts+=anno[0]+': '+','.join(tmp[ii])+" "
self.layout()
for v in self.viruses:
if v.strain in self.node_lookup:
node = self.node_lookup[v.strain]
for attr in ['strain', 'desc']:
try:
node.__setattr__(attr, v.__getattribute__(attr))
except:
pass
# make an amino acid aligment
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
if self.cds is not None:
tmp_aaseqs = [SeqRecord(Seq(node.aa_seq), id=node.strain, annotations = {'num_date':node.num_date, 'region':node.region}) for node in self.tree.leaf_iter()]
tmp_aaseqs.sort(key = lambda x:x.annotations['num_date'])
self.aa_aln = MultipleSeqAlignment(tmp_aaseqs)
tmp_nucseqs = [SeqRecord(Seq(node.seq), id=node.strain, annotations = {'num_date':node.num_date, 'region':node.region}) for node in self.tree.leaf_iter()]
tmp_nucseqs.sort(key = lambda x:x.annotations['num_date'])
self.nuc_aln = MultipleSeqAlignment(tmp_nucseqs)
def export(self):
from bio_draw import muttree_draw
def select_fontsize(n):
if n<10:
return 12
elif n<50:
return 10
else:
return 8
def branch_label_func(n):
max_muts = 5
if hasattr(n,'aa_muts'):
if alt:
muts = n.alt_aa_muts
else:
muts = n.aa_muts
else:
print(n,"has no amino acid mutations")
try:
muts = n.nuc_muts
except:
print(n,"has no nucleotide mutations")
muts = ""
tmp = muts.split(',')
if len(tmp)>max_muts:
return ', '.join(tmp[:max_muts])+' + '+str(len(tmp)-max_muts)+' others'
else:
return ', '.join(tmp)
from Bio import Phylo
import matplotlib
matplotlib.use('cairo')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':select_fontsize(len(self.viruses))})
plt.ioff()
from tree_util import to_Biopython
tmp_tree = to_Biopython(self.tree)
tmp_tree.ladderize()
for alt in [False] if self.anno is None else [False, True]:
fig = plt.figure('Tree', figsize = (15,2+len(self.viruses)/5))
ax = plt.subplot('111')
muttree_draw(tmp_tree, axes=ax, show_confidence=False, do_show=False,
label_func = lambda x: x.name,
branch_labels = branch_label_func
)
ax.invert_yaxis()
tl = np.diff(ax.get_xticks())[0]
lengthbar = tl/2
plt.plot( [0,lengthbar],[len(self.viruses),len(self.viruses)], lw=10, c='k')
plt.text(lengthbar/2, len(self.viruses)+0.1, str(lengthbar),horizontalalignment='center',fontsize=16)
ax.set_axis_off()
for fmt in self.formats:
if alt:
plt.savefig(self.outdir+'tree_alt.'+fmt)
else:
plt.savefig(self.outdir+'tree.'+fmt)
for t in tmp_tree.find_clades():
t.label = t.name # save original name
if hasattr(t,"strain"):
t.name = t.strain
else:
t.name = ""
if alt:
muts = t.alt_aa_muts if hasattr(t,'alt_aa_muts') else t.nuc_muts
else:
muts = t.aa_muts if hasattr(t,'aa_muts') else t.nuc_muts
if len(t.name) and len(muts): t.name+='-'
t.name+='_'.join(muts.split(',')).replace(' ','')
if alt:
Phylo.write(tmp_tree, self.outdir+'tree_alt.nwk', 'newick')
else:
Phylo.write(tmp_tree, self.outdir+'tree.nwk', 'newick')
for t in tmp_tree.find_clades(): # revert to original name
t.name = t.label
plt.close('Tree')
for n in self.tree.leaf_iter():
for field in self.fasta_fields.values():
if (not hasattr(n, field)) or n.__dict__[field]=="":
n.__dict__[field]="Unknown"
for n in self.tree.postorder_internal_node_iter():
for field in self.fasta_fields.values():
n.__dict__[field]="Unknown"
if self.cds is None:
self.export_to_auspice(tree_fields = ['nuc_muts','num_date']+self.fasta_fields.values(), seq='nuc')
else:
self.export_to_auspice(tree_fields = ['aa_muts','alt_aa_muts','num_date']+self.fasta_fields.values())
def make_strain_names_unique(self):
strain_to_seq = defaultdict(list)
for v in self.viruses:
strain_to_seq[v['strain'].upper()].append(v)
for strain, strain_list in strain_to_seq.iteritems():
if len(strain_list)>1:
for ii, virus in enumerate(strain_list):
virus['strain']+='-'+str(ii+1)
def run(self, raxml_time_limit):
rax_tlimit = raxml_time_limit
self.align()
for v in self.viruses:
v.description=''
AlignIO.write(self.viruses, self.auspice_align_fname, 'fasta')
self.remove_insertions()
if len(self.viruses)>no_raxml_threshold:
rax_tlimit = 0
print "--- Tree infer at " + time.strftime("%H:%M:%S") + " ---"
self.infer_tree(rax_tlimit)
print "--- Infer ancestral sequences " + time.strftime("%H:%M:%S") + " ---"
self.infer_ancestral() # -> every node has a sequence
print "--- Tree refine at " + time.strftime("%H:%M:%S") + " ---"
self.refine()
if self.cds:
aa_aln = MultipleSeqAlignment([])
for node in self.tree.leaf_iter():
aa_aln.append(SeqRecord(id=node.strain, seq=Seq(node.aa_seq), description=''))
AlignIO.write(aa_aln, self.auspice_aa_align_fname, 'fasta')
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Build a tree given a fasta file and annotate braches with mutations')
parser.add_argument('--aln', required = True, type = str, help ="fasta file with input sequences")
parser.add_argument('--outgroup', required = True, type = str, help ="outgroup to root the tree, strain label or fasta file")
parser.add_argument('--cds', nargs = '+', type = int, default = None, help='part of the outgroup sequence that is to be translated')
parser.add_argument('--out', type = str, default = 'output/', help='output directory')
parser.add_argument('--nthreads', type = int, default=1, help ="number of threads to use (mafft and raxml)")
parser.add_argument('--mplconfigdir', type = str, default="/tmp/", help ="directory for matplotlib configuration directory")
params = parser.parse_args()
# set matplot configuration path, needs to be writable and set before matplotib import (in .export)
os.environ['MPLCONFIGDIR'] = params.mplconfigdir
# check and parse cds
if params.cds is None:
virus_config['cds']=None
else:
if len(params.cds)==2:
virus_config['cds']=params.cds
elif len(params.cds)==1:
virus_config['cds']=(params.cds[0], None)
else:
raise ValueError("Expecting a cds of length 1 (start only) or 2, got "+str(params.cds))
exit()
debug_fasta_dir = path_to_augur+'/broken_fasta'
if not os.path.isdir(debug_fasta_dir):
os.makedirs(debug_fasta_dir)
tmp_fasta_fname = debug_fasta_dir+'/'+'_'.join(['aln', time.strftime('%Y%m%d-%H%M%S',time.gmtime()),
str(np.random.randint(0,1000000))])+'.fasta'
shutil.copy2(params.aln, tmp_fasta_fname)
# check and create output directory
if not os.path.isdir(params.out):
try:
os.makedirs(params.out)
os.makedirs(params.out+'/js')
os.makedirs(params.out+'/css')
except OSError as e:
print "Cannot create output directory",e
if not os.path.isdir(params.out+'/js'):
try:
os.makedirs(params.out+'/js')
except OSError as e:
print "Cannot create output directory",e
if not os.path.isdir(params.out+'/css'):
try:
os.makedirs(params.out+'/css')
except OSError as e:
print "Cannot create output directory",e
shutil.copy2(params.aln, params.out+'/input_sequences.fasta')
shutil.copy2(path_to_augur + '/../auspice/error.html', params.out+'/index.html')
shutil.copy2(path_to_augur + '/../auspice/js/muttree.js', params.out+'/js/muttree.js')
shutil.copy2(path_to_augur + '/../auspice/js/msa.min.js', params.out+'/js/msa.min.js')
shutil.copy2(path_to_augur + '/../auspice/js/d3.min.js', params.out+'/js/d3.min.js')
shutil.copy2(path_to_augur + '/../auspice/js/d3.tip.js', params.out+'/js/d3.tip.js')
shutil.copy2(path_to_augur + '/../auspice/js/FileSaver.js', params.out+'/js/FileSaver.js')
shutil.copy2(path_to_augur + '/../auspice/js/autocomplete.js', params.out+'/js/autocomplete.js')
shutil.copy2(path_to_augur + '/../auspice/css/style.css', params.out+'/css/style.css')
virus_config["outdir"]=params.out
virus_config["nthreads"]=params.nthreads
try:
muttree = mutation_tree(params.aln, params.outgroup, **virus_config)
muttree.run(raxml_time_limit=0.1)
muttree.export()
with open(muttree.outdir+'/js/fields.js', 'w') as ofile:
for field in ['passage', 'host', 'subtype','region']:
try:
tmp = sorted(set([x.__dict__[field] for x in muttree.tree.leaf_iter()]))
except:
tmp = ["Unknown"]
if "Unknown" not in tmp: tmp.append("Unknown")
ofile.write(field + 's = [' + ', '.join(map(lambda x:'"'+str(x)+'"',tmp))+']\n')
shutil.copy2(path_to_augur + '/../auspice/index.html', muttree.outdir+'index.html')
os.remove(tmp_fasta_fname)
except:
print("treetool run failed")
| mit |
Mirantis/launchpad-reports-summary | launchpad_reporting/criterias.py | 1 | 4951 | import datetime
from pandas.tseries import offsets
from launchpad_reporting.db.util import process_date
CRITICAL_IMPORTANCE = "Critical"
HIGH_IMPORTANCE = "High"
CONFIRMED_STATUS = "Confirmed"
IN_PROGRESS_STATUS = "In Progress"
TRIAGED_STATUS = "Triaged"
CUSTOMER_FOUND_TAG = "customer-found"
def business_days_ago(days):
timedelta = datetime.datetime.now() - offsets.BDay(days)
return process_date(timedelta.to_datetime())
class BugCriteria(object):
def get_hint_text(self, bug, template):
is_customer_found = CUSTOMER_FOUND_TAG in bug.tags
if getattr(self, 'threshold', None):
threshold = self.threshold
else:
if bug.importance in [CRITICAL_IMPORTANCE, HIGH_IMPORTANCE]:
threshold_template = bug.importance.lower()
else:
threshold_template = "others"
if is_customer_found:
threshold_template += "_customer_found"
threshold_template += "_threshold"
try:
threshold = getattr(self, threshold_template)
except AttributeError:
if is_customer_found:
threshold = getattr(
self,
threshold_template.replace("_customer_found", "")
)
hint_data = {
"importance": bug.importance,
"with_customer_found": "with `customer-found` tag" if is_customer_found else "",
"threshold": threshold,
}
return template.format(**hint_data)
class NonTriaged(BugCriteria):
"""Implementation for `sla-non-triaged` criteria"""
def __init__(self, threshold):
self.threshold = threshold
def is_satisfied(self, bug):
return (process_date(bug.date_created) < business_days_ago(self.threshold)
and (bug.milestone is None or bug.importance is None
or bug.assignee is None)
)
class SLAFullLifecycle(BugCriteria):
"""Implementation for `sla-full-lifecycle` criteria"""
def __init__(self, critical_threshold, high_threshold):
self.critical_threshold = critical_threshold
self.high_threshold = high_threshold
def is_satisfied(self, bug):
if bug.importance == HIGH_IMPORTANCE:
return process_date(bug.date_created) < business_days_ago(self.high_threshold)
if bug.importance == CRITICAL_IMPORTANCE:
return (process_date(bug.date_created) <
business_days_ago(self.critical_threshold))
else:
# False for all other bugs
return False
class SLAConfirmedTriaged(BugCriteria):
"""Implementation for `sla-confirmed-triaged` criteria"""
def __init__(self, threshold):
self.threshold = threshold
def is_satisfied(self, bug):
if bug.status in [CONFIRMED_STATUS, TRIAGED_STATUS]:
return process_date(bug.date_last_updated) < business_days_ago(self.threshold)
return False
class SLAInProgress(BugCriteria):
"""Implementation for `sla-in-progress` criteria"""
def __init__(self, critical_customer_found_threshold, critical_threshold,
high_customer_found_threshold, high_threshold,
others_threshold):
self.critical_customer_found_threshold = critical_customer_found_threshold
self.critical_threshold = critical_threshold
self.high_customer_found_threshold = high_customer_found_threshold
self.high_threshold = high_threshold
self.others_threshold = others_threshold
def is_satisfied(self, bug):
if bug.status != IN_PROGRESS_STATUS:
return False
if bug.importance == CRITICAL_IMPORTANCE and CUSTOMER_FOUND_TAG in bug.tags:
return process_date(bug.date_in_progress) < business_days_ago(self.critical_customer_found_threshold)
if bug.importance == CRITICAL_IMPORTANCE:
return process_date(bug.date_in_progress) < business_days_ago(self.critical_threshold)
if bug.importance == HIGH_IMPORTANCE and CUSTOMER_FOUND_TAG in bug.tags:
return process_date(bug.date_in_progress) < business_days_ago(self.high_customer_found_threshold)
if bug.importance == HIGH_IMPORTANCE:
return process_date(bug.date_in_progress) < business_days_ago(self.high_threshold)
return process_date(bug.date_in_progress) < business_days_ago(self.others_threshold)
class HCFReport(BugCriteria):
"""Implementation for `hcf-report` criteria"""
def __init__(self, exclude_tags):
self.exclude_tags = exclude_tags
def is_satisfied(self, bug):
if bool(set(self.exclude_tags) & set(bug.tags)):
return False
return True
class All(BugCriteria):
"""Implementation for `all` criteria"""
def __init__(self):
pass
def is_satisfied(self, bug):
return True | mit |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_except.py | 15 | 1043 | #!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(unittest.TestCase):
def test_raise_on_find_unsupported_value(self):
import datetime
self.assertRaises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalidvalue(self):
self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
| artistic-2.0 |
jseabold/scipy | scipy/stats/kde.py | 18 | 17306 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| bsd-3-clause |
wlamond/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 43 | 28175 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert_true(isinstance(estimator[0].steps[-1][1].random_state,
int))
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_)
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert_equal(len(estimators_samples), len(estimators))
assert_equal(len(estimators_samples[0]), len(X))
assert_equal(estimators_samples[0].dtype.kind, 'b')
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert_equal(bagging._max_samples, max_samples)
| bsd-3-clause |
sanjayankur31/nest-simulator | pynest/examples/sinusoidal_gamma_generator.py | 8 | 11885 | # -*- coding: utf-8 -*-
#
# sinusoidal_gamma_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
Sinusoidal gamma generator example
----------------------------------
This script demonstrates the use of the ``sinusoidal_gamma_generator`` and its
different parameters and modes. The source code of the model can be found in
``models/sinusoidal_gamma_generator.h``.
The script is structured into two parts, each of which generates its own
figure. In part 1A, two generators are created with different orders of the
underlying gamma process and their resulting PST (Peristiumulus time) and ISI
(Inter-spike interval) histograms are plotted. Part 1B illustrates the effect
of the ``individual_spike_trains`` switch. In Part 2, the effects of
different settings for rate, phase and frequency are demonstrated.
"""
###############################################################################
# First, we import all necessary modules to simulate, analyze and
# plot this example.
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
###############################################################################
# We first create a figure for the plot and set the resolution of NEST.
plt.figure()
nest.SetKernelStatus({'resolution': 0.01})
###############################################################################
# Then we create two instances of the ``sinusoidal_gamma_generator`` with two
# different orders of the underlying gamma process using ``Create``. Moreover,
# we create devices to record firing rates (``multimeter``) and spikes
# (``spike_recorder``) and connect them to the generators using ``Connect``.
num_nodes = 2
g = nest.Create('sinusoidal_gamma_generator', n=num_nodes,
params={'rate': 10000.0,
'amplitude': 5000.0,
'frequency': 10.0,
'phase': 0.0,
'order': [2.0, 10.0]}) # note the syntax for different order parameter of the two nodes
m = nest.Create('multimeter', num_nodes, {'interval': 0.1, 'record_from': ['rate']})
s = nest.Create('spike_recorder', num_nodes)
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
nest.Simulate(200)
###############################################################################
# After simulating, the spikes are extracted from the ``spike_recorder`` and
# plots are created with panels for the PST and ISI histograms.
colors = ['b', 'g']
for j in range(num_nodes):
ev = m[j].events
t = ev['times']
r = ev['rate']
spike_times = s[j].events['times']
plt.subplot(221)
h, e = np.histogram(spike_times, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(spike_times), bins=np.arange(0., 0.505, 0.01),
histtype='step', color=colors[j])
plt.title('ISI histogram')
###############################################################################
# The kernel is reset and the number of threads set to 4.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
###############################################################################
# First, a ``sinusoidal_gamma_generator`` with ``individual_spike_trains`` set to
# `True` is created and connected to 20 parrot neurons whose spikes are
# recorded by a spike recorder. After simulating, a raster plot of the spikes
# is created.
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = s.events
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
#################################################################################
# The kernel is reset again and the whole procedure is repeated for a
# ``sinusoidal_gamma_generator`` with ``individual_spike_trains`` set to `False`.
# The plot shows that in this case, all neurons receive the same spike train
# from the ``sinusoidal_gamma_generator``.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = s.events
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
###############################################################################
# In part 2, multiple generators are created with different settings for rate,
# phase and frequency. First, we define an auxiliary function, which simulates
# `n` generators for `t` ms. After `t/2`, the parameter dictionary of the
# generators is changed from initial to after.
def step(t, n, initial, after, seed=1, dt=0.05):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt, "rng_seed": seed})
g = nest.Create('sinusoidal_gamma_generator', n, params=initial)
sr = nest.Create('spike_recorder')
nest.Connect(g, sr)
nest.Simulate(t / 2)
g.set(after)
nest.Simulate(t / 2)
return sr.events
###############################################################################
# This function serves to plot a histogram of the emitted spikes.
def plot_hist(spikes):
plt.hist(spikes['times'],
bins=np.arange(0., max(spikes['times']) + 1.5, 1.),
histtype='step')
t = 1000
n = 1000
dt = 1.0
steps = int(t / dt)
offset = t / 1000. * 2 * np.pi
# We create a figure with a 2x3 grid.
grid = (2, 3)
fig = plt.figure(figsize=(15, 10))
###############################################################################
# We simulate a ``sinusoidal_gamma_generator`` with default parameter values,
# i.e. ``ac=0`` and the DC value being changed from 20 to 50 after `t/2` and
# plot the number of spikes per second over time.
plt.subplot(grid[0], grid[1], 1)
spikes = step(t, n,
{'rate': 20.0},
{'rate': 50.0, },
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(int(steps))
exp[:int(steps / 2)] *= 20
exp[int(steps / 2):] *= 50
plt.plot(exp, 'r')
plt.title('DC rate: 20 -> 50')
plt.ylabel('Spikes per second')
###############################################################################
# We simulate a ``sinusoidal_gamma_generator`` with the DC value being changed
# from 80 to 40 after `t/2` and plot the number of spikes per second over
# time.
plt.subplot(grid[0], grid[1], 2)
spikes = step(t, n,
{'order': 6.0, 'rate': 80.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
{'order': 6.0, 'rate': 40.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(int(steps))
exp[:int(steps / 2)] *= 80
exp[int(steps / 2):] *= 40
plt.plot(exp, 'r')
plt.title('DC rate: 80 -> 40')
###############################################################################
# Next, we simulate a ``sinusoidal_gamma_generator`` with the AC value being
# changed from 40 to 20 after `t/2` and plot the number of spikes per
# second over time.
plt.subplot(grid[0], grid[1], 3)
spikes = step(t, n,
{'order': 3.0, 'rate': 40.0, 'amplitude': 40.,
'frequency': 10., 'phase': 0.},
{'order': 3.0, 'rate': 40.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (40. + 40. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (40. + 20. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 40 -> 20')
##################################################################################
# Finally, we simulate a ``sinusoidal_gamma_generator`` with a non-zero AC value
# and the DC value being changed from 80 to 40 after `t/2` and plot the
# number of spikes per second over time.
plt.subplot(grid[0], grid[1], 4)
spikes = step(t, n,
{'order': 6.0, 'rate': 20.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 50.0, 'amplitude': 50.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (20. + 20. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (50. + 50. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('DC Rate and Rate Modulation: 20 -> 50')
plt.ylabel('Spikes per second')
plt.xlabel('Time [ms]')
###############################################################################
# Simulate a ``sinusoidal_gamma_generator`` with the AC value being
# changed from 0 to 40 after `t/2` and plot the number of spikes per
# second over time.
plt.subplot(grid[0], grid[1], 5)
spikes = step(t, n,
{'rate': 40.0, },
{'amplitude': 40.0, 'frequency': 20.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = 40. * np.ones(int(steps / 2))
exp[int(steps / 2):] = (40. + 40. * np.sin(np.arange(
0, t / 1000. * np.pi * 20, t / 1000. * np.pi * 20. / (steps / 2))))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 0 -> 40')
plt.xlabel('Time [ms]')
###############################################################################
# Simulate a ``sinusoidal_gamma_generator`` with a phase shift at
# `t/2` and plot the number of spikes per second over time.
# Phase shift
plt.subplot(grid[0], grid[1], 6)
spikes = step(t, n,
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 180.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (60. + 60. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (60. + 60. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset + np.pi))
plt.plot(exp, 'r')
plt.title('Modulation Phase: 0 -> Pi')
plt.xlabel('Time [ms]')
plt.show()
| gpl-2.0 |
pyramania/scipy | scipy/stats/stats.py | 2 | 179322 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma, zeros
from scipy._lib.six import callable, string_types
from scipy._lib._version import NumpyVersion
import scipy.special as special
import scipy.linalg as linalg
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_dis
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0):
# Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that if the input contains nan
'omit' delegates to mstats_basic.kendalltau(), which has a different
implementation.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
# Limit range to fix computational errors
return KendalltauResult(min(1., max(-1., tau)), pvalue)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
aabadie/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/patches.py | 10 | 142681 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or
facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_fill(fill)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if six.callable(self._contains):
return self._contains(self, mouseevent)
if radius is None:
radius = self.get_linewidth()
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
if radius is None:
radius = self.get_linewidth()
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.edgecolor']
self._original_edgecolor = color
self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.facecolor']
self._original_facecolor = color # save: otherwise changing _fill
# may lose alpha information
self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)
if not self._fill:
self._facecolor = list(self._facecolor)
self._facecolor[3] = 0
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self.set_facecolor(self._original_facecolor) # using self._fill and
# self._alpha
self.set_edgecolor(self._original_edgecolor)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None:
ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self.set_facecolor(self._original_facecolor)
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.colorConverter.to_rgba(
self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
self._angle = angle
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width == 0 or self._height == 0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
def set_radius(self, radius):
self._path = None
self.r = radius
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
def set_width(self, width):
self._path = None
self.width = width
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx ** 2 + dy ** 2) or 1 # account for div by zero
cx = float(dx) / L
sx = float(dy) / L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 20 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx ** 2 + dy ** 2)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
cx = float(dx) / distance
sx = float(dy) / distance
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
self.radius = radius
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
#self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
#adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, \
height + 2. * pad,
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
#self.pad = pad
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, \
height + 2. * pad,
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2. * pad - 2 * dr, \
height + 2. * pad - 2 * dr,
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2. * pad - tooth_size, \
height + 2. * pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = list(zip(bottom_saw_x, bottom_saw_y)) + \
list(zip(right_saw_x, right_saw_y)) + \
list(zip(top_saw_x, top_saw_y)) + \
list(zip(left_saw_x, left_saw_y)) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1) // 2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: %(AvailableBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arm is extend so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
*armA* : minimum length of armA
*armB* : minimum length of armB
*fraction* : a fraction of the distance between two points that
will be added to armA and armB.
*angle* : angle of the connecting line (if None, parallel to A
and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the amount arrow head and
etc. will be scaled. The linewidth may be used to adjust
the the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrinked,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx ** 2 + dy ** 2)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if self.beginarrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if self.endarrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA, lengthA=lengthA, angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = \
make_wedged_bezier2(arrow_in, head_width / 2.,
wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
#head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
self._dpi_cor = dpi_cor
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| gpl-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_5/Optical2.py | 33 | 7437 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Optical_lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
CalSol/Impulse | Telemetry/viewer/backend_pysideagg.py | 1 | 4858 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from backend_pyside import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure, parent=None):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__(self, figure, parent)
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(buffer(stringBuffer), int(self.renderer.width),
int(self.renderer.height),
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| apache-2.0 |
jwdebelius/Machiavellian | setup.py | 1 | 1305 | #/urs/bin/env python
__version__ = "0.0.1-dev"
import os
from distutils.core import setup
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Statitics
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
setup(name='machivellian',
version="0.0.1-dev",
license='BSD2',
description="Library for testing monte carlo effect sizes",
long_description=("Library for testing monte carlo effect sizes"),
author="J W Debelius",
author_email="[email protected]",
maintainer="J W Debelius",
maintainer_email="[email protected]",
packages=['machivellian', 'machivellian.tests'],
install_requires=['IPython >= 4.2.0',
'matplotlib >= 1.5.1',
'numpy >= 1.10.0',
'pandas >= 0.18.0',
'scipy >= 0.15.1',
'scikit-bio >= 0.4.2',
'nose >= 1.3.7',
],
)
| bsd-3-clause |
nikv96/Feature-Detection | HOG/Tester.py | 1 | 2867 | from skimage.transform import pyramid_gaussian
from skimage.io import imread
from skimage.feature import hog
from sklearn.externals import joblib
import cv2
import warnings
import sys
def overlapping_area(detection_1, detection_2):
x1_tl = detection_1[0]
x2_tl = detection_2[0]
x1_br = detection_1[0] + detection_1[3]
x2_br = detection_2[0] + detection_2[3]
y1_tl = detection_1[1]
y2_tl = detection_2[1]
y1_br = detection_1[1] + detection_1[4]
y2_br = detection_2[1] + detection_2[4]
x_overlap = max(0, min(x1_br, x2_br)-max(x1_tl, x2_tl))
y_overlap = max(0, min(y1_br, y2_br)-max(y1_tl, y2_tl))
overlap_area = x_overlap * y_overlap
area_1 = detection_1[3] * detection_2[4]
area_2 = detection_2[3] * detection_2[4]
total_area = area_1 + area_2 - overlap_area
return overlap_area / float(total_area)
def nms(detections, threshold=.5):
if len(detections) == 0:
return []
detections = sorted(detections, key=lambda detections: detections[2],reverse=True)
new_detections=[]
new_detections.append(detections[0])
del detections[0]
for index, detection in enumerate(detections):
for new_detection in new_detections:
if overlapping_area(detection, new_detection) > threshold:
del detections[index]
break
else:
new_detections.append(detection)
del detections[index]
return new_detections
def warn(*args, **kwargs):
pass
warnings.warn = warn
def sliding_window(image, window_size, step_size):
for a in xrange(0, image.shape[0], step_size[1]):
for b in xrange(0, image.shape[1], step_size[0]):
yield (b, a, image[a:a + window_size[1], b:b + window_size[0]])
def test():
im = imread("4.jpg", as_grey=True)
min_wdw_sz = [40, 40]
step_size = [10, 10]
orientations = 9
pixels_per_cell = [8,8]
cells_per_block = [4,4]
visualize = False
normalize = True
model_path = "models/svm.model"
threshold = 0
downscale = 1.25
clf = joblib.load(model_path)
detections = []
scale = 0
for im_scaled in pyramid_gaussian(im, downscale=downscale):
cd = []
if im_scaled.shape[0] < min_wdw_sz[1] or im_scaled.shape[1] < min_wdw_sz[0]:
break
for (x, y, im_window) in sliding_window(im_scaled, min_wdw_sz, step_size):
if im_window.shape[0] != min_wdw_sz[1] or im_window.shape[1] != min_wdw_sz[0]:
continue
fd = hog(im_window, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
pred = clf.predict(fd)
if pred == 1:
detections.append((x, y, clf.decision_function(fd),
int(min_wdw_sz[0]*(downscale**scale)),
int(min_wdw_sz[1]*(downscale**scale))))
cd.append(detections[-1])
scale+=1
clone = im.copy()
detections = nms(detections, threshold)
for (x_tl, y_tl, _, w, h) in detections:
cv2.rectangle(clone, (x_tl, y_tl), (x_tl+w,y_tl+h), (0, 0, 0), thickness=2)
cv2.imshow("RESULT", clone)
cv2.waitKey()
if __name__=="__main__":
test()
| mit |
chrish42/pylearn | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
consulo/consulo-python | plugin/src/main/dist/helpers/pydev/pydevconsole.py | 1 | 18230 | '''
Entry point module to start the interactive console.
'''
from _pydev_imps._pydev_saved_modules import thread
start_new_thread = thread.start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
from code import compile_command
from code import InteractiveInterpreter
import os
import sys
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import dict_iter_items
import traceback
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydevd_bundle import pydevd_vars, pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
try:
False
True
except NameError: # version < 2.3 -- didn't have the True/False builtins
import __builtin__
setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax
setattr(__builtin__, 'False', 0)
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from _pydev_bundle.pydev_console_utils import CodeFragment
IS_PYTHON_3K = False
IS_PY24 = False
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except:
#That's OK, not all versions of python have sys.version_info
pass
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
try:
import builtins # @UnresolvedImport
builtins.runfile = runfile
except:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread, show_banner=True):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
self.namespace = {}
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def getCompletions(self, text, act_tok):
try:
from _pydev_bundle._pydev_completer import Completer
completer = Completer(self.namespace, None)
return completer.complete(act_tok)
except:
import traceback
traceback.print_exc()
return []
def close(self):
sys.exit(0)
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def process_exec_queue(interpreter):
from pydev_ipython.inputhook import get_inputhook, set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
from _pydev_bundle.pydev_import_hook import import_hook_manager
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui))
# enable_gui_function in activate_matplotlib should be called in main thread. That's why we call
# interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread.
import_hook_manager.add_module_name("pylab", activate_pylab)
import_hook_manager.add_module_name("pyplot", activate_pyplot)
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if hasattr(code_fragment, '__call__'):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def handshake():
return "PyCharm"
#=======================================================================================================================
# start_console_server
#=======================================================================================================================
def start_console_server(host, port, interpreter):
if port == 0:
host = ''
#I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport
try:
if IS_PY24:
server = XMLRPCServer((host, port), logRequests=False)
else:
server = XMLRPCServer((host, port), logRequests=False, allow_none=True)
except:
sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port))
sys.stderr.flush()
raise
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server.register_function(interpreter.execLine)
server.register_function(interpreter.execMultipleLines)
server.register_function(interpreter.getCompletions)
server.register_function(interpreter.getFrame)
server.register_function(interpreter.getVariable)
server.register_function(interpreter.changeVariable)
server.register_function(interpreter.getDescription)
server.register_function(interpreter.close)
server.register_function(interpreter.interrupt)
server.register_function(handshake)
server.register_function(interpreter.connectToDebugger)
server.register_function(interpreter.hello)
server.register_function(interpreter.getArray)
server.register_function(interpreter.evaluate)
# Functions for GUI main loop integration
server.register_function(interpreter.enableGui)
if port == 0:
(h, port) = server.socket.getsockname()
print(port)
print(interpreter.client_port)
sys.stderr.write(interpreter.get_greeting_msg())
sys.stderr.flush()
while True:
try:
server.serve_forever()
except:
# Ugly code to be py2/3 compatible
# https://sw-brainwy.rhcloud.com/tracker/PyDev/534:
# Unhandled "interrupted system call" error in the pydevconsol.py
e = sys.exc_info()[1]
retry = False
try:
retry = e.args[0] == 4 #errno.EINTR
except:
pass
if not retry:
raise
# Otherwise, keep on going
return server
def start_server(host, port, client_port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
interpreter = InterpreterInterface(host, client_port, threading.currentThread())
start_new_thread(start_console_server,(host, port, interpreter))
process_exec_queue(interpreter)
def get_ipython_hidden_vars_dict():
useful_ipython_vars = ['_', '__']
try:
if IPYTHON and hasattr(__builtin__, 'interpreter'):
pydev_interpreter = get_interpreter().interpreter
if hasattr(pydev_interpreter, 'ipython') and hasattr(pydev_interpreter.ipython, 'user_ns_hidden'):
user_ns_hidden = pydev_interpreter.ipython.user_ns_hidden
if isinstance(user_ns_hidden, dict):
# Since IPython 2 dict `user_ns_hidden` contains hidden variables and values
user_hidden_dict = user_ns_hidden
else:
# In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables
user_hidden_dict = dict([(key, val) for key, val in dict_iter_items(pydev_interpreter.ipython.user_ns)
if key in user_ns_hidden])
return dict([(key, val) for key, val in dict_iter_items(user_hidden_dict) if key not in useful_ipython_vars])
return None
except Exception:
traceback.print_exc()
return None
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
setattr(__builtin__, 'interpreter', interpreterInterface)
sys.stderr.write(interpreterInterface.get_greeting_msg())
sys.stderr.flush()
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#===============================================================================
# Debugger integration
#===============================================================================
def exec_code(code, globals, locals, debugger):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
res = interpreterInterface.need_more(code)
if res:
return True
interpreterInterface.add_exec(code, debugger)
return False
class ConsoleWriter(InteractiveInterpreter):
skip = 0
def __init__(self, locals=None):
InteractiveInterpreter.__init__(self, locals)
def write(self, data):
#if (data.find("global_vars") == -1 and data.find("pydevd") == -1):
if self.skip > 0:
self.skip -= 1
else:
if data == "Traceback (most recent call last):\n":
self.skip = 1
sys.stderr.write(data)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
#Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list))
def showtraceback(self):
"""Display the exception that just occurred."""
#Override for avoid using sys.excepthook PY-12600
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(''.join(lines))
def console_exec(thread_id, frame_id, expression, dbg):
"""returns 'False' in case expression is partially correct
"""
frame = pydevd_vars.find_frame(thread_id, frame_id)
expression = str(expression.replace('@LINE@', '\n'))
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
if IPYTHON:
need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg)
if not need_more:
pydevd_save_locals.save_locals(frame)
return need_more
interpreter = ConsoleWriter()
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
# Case 1
interpreter.showsyntaxerror()
return False
if code is None:
# Case 2
return True
#Case 3
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
else:
pydevd_save_locals.save_locals(frame)
return False
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn(sys.stdin)
port, client_port = sys.argv[1:3]
from _pydev_bundle import pydev_localhost
if int(port) == 0 and int(client_port) == 0:
(h, p) = pydev_localhost.get_socket_name()
client_port = p
pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port))
| apache-2.0 |
guschmue/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 3 | 31357 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (
_check_dtype(self._y.dtype) if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/doc/pyplots/plotmap.py | 7 | 2211 | import matplotlib.pyplot as plt
import numpy as np
try:
from mpl_toolkits.basemap import Basemap
have_basemap = True
except ImportError:
have_basemap = False
def plotmap():
# create figure
fig = plt.figure(figsize=(8,8))
# set up orthographic map projection with
# perspective of satellite looking down at 50N, 100W.
# use low resolution coastlines.
map = Basemap(projection='ortho',lat_0=50,lon_0=-100,resolution='l')
# lat/lon coordinates of five cities.
lats=[40.02,32.73,38.55,48.25,17.29]
lons=[-105.16,-117.16,-77.00,-114.21,-88.10]
cities=['Boulder, CO','San Diego, CA',
'Washington, DC','Whitefish, MT','Belize City, Belize']
# compute the native map projection coordinates for cities.
xc,yc = map(lons,lats)
# make up some data on a regular lat/lon grid.
nlats = 73; nlons = 145; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:])
wave = 0.75*(np.sin(2.*lats)**8*np.cos(4.*lons))
mean = 0.5*np.cos(2.*lats)*((np.sin(2.*lats))**2 + 2.)
# compute native map projection coordinates of lat/lon grid.
# (convert lons and lats to degrees first)
x, y = map(lons*180./np.pi, lats*180./np.pi)
# draw map boundary
map.drawmapboundary(color="0.9")
# draw graticule (latitude and longitude grid lines)
map.drawmeridians(np.arange(0,360,30),color="0.9")
map.drawparallels(np.arange(-90,90,30),color="0.9")
# plot filled circles at the locations of the cities.
map.plot(xc,yc,'wo')
# plot the names of five cities.
for name,xpt,ypt in zip(cities,xc,yc):
plt.text(xpt+100000,ypt+100000,name,fontsize=9,color='w')
# contour data over the map.
cs = map.contour(x,y,wave+mean,15,linewidths=1.5)
# draw blue marble image in background.
# (downsample the image by 50% for speed)
map.bluemarble(scale=0.5)
def plotempty():
# create figure
fig = plt.figure(figsize=(8,8))
fig.text(0.5, 0.5, "Sorry, could not import Basemap",
horizontalalignment='center')
if have_basemap:
plotmap()
else:
plotempty()
plt.show()
| mit |
alekz112/statsmodels | statsmodels/examples/ex_lowess.py | 34 | 2827 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:26:06 2011
Author: Chris Jordan Squire
extracted from test suite by josef-pktd
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# this is just to check direct import
import statsmodels.nonparametric.smoothers_lowess
statsmodels.nonparametric.smoothers_lowess.lowess
x = np.arange(20.)
#standard normal noise
noise = np.array([-0.76741118, -0.30754369,
0.39950921, -0.46352422, -1.67081778,
0.6595567 , 0.66367639, -2.04388585,
0.8123281 , 1.45977518,
1.21428038, 1.29296866, 0.78028477,
-0.2402853 , -0.21721302,
0.24549405, 0.25987014, -0.90709034,
-1.45688216, -0.31780505])
y = x + noise
expected_lowess = np.array([[ 0. , -0.58337912],
[ 1. , 0.61951246],
[ 2. , 1.82221628],
[ 3. , 3.02536876],
[ 4. , 4.22667951],
[ 5. , 5.42387723],
[ 6. , 6.60834945],
[ 7. , 7.7797691 ],
[ 8. , 8.91824348],
[ 9. , 9.94997506],
[ 10. , 10.89697569],
[ 11. , 11.78746276],
[ 12. , 12.62356492],
[ 13. , 13.41538492],
[ 14. , 14.15745254],
[ 15. , 14.92343948],
[ 16. , 15.70019862],
[ 17. , 16.48167846],
[ 18. , 17.26380699],
[ 19. , 18.0466769 ]])
actual_lowess = lowess(y, x)
print(actual_lowess)
print(np.max(np.abs(actual_lowess-expected_lowess)))
plt.plot(y, 'o')
plt.plot(actual_lowess[:,1])
plt.plot(expected_lowess[:,1])
import os.path
import statsmodels.nonparametric.tests.results
rpath = os.path.split(statsmodels.nonparametric.tests.results.__file__)[0]
rfile = os.path.join(rpath, 'test_lowess_frac.csv')
test_data = np.genfromtxt(open(rfile, 'rb'),
delimiter = ',', names = True)
expected_lowess_23 = np.array([test_data['x'], test_data['out_2_3']]).T
expected_lowess_15 = np.array([test_data['x'], test_data['out_1_5']]).T
actual_lowess_23 = lowess(test_data['y'], test_data['x'] ,frac = 2./3)
actual_lowess_15 = lowess(test_data['y'], test_data['x'] ,frac = 1./5)
#plt.show()
| bsd-3-clause |
toohsk/TensorFlow_cookbook | stanford/linear_regression/linear_regression.py | 1 | 1531 | # linear regression in stanford university channel
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
'''
Good ole linear regression: find the best linear fit to our data
'''
def generate_dataset():
# data is generated by y = 2x + e
# where 'e' is sampled from a normal distribution
x_batch = np.linspace(-1, 1, 101)
y_batch = 2 * x_batch + np.random.randn(*x_batch.shape) * 0.3
return x_batch, y_batch
def linear_regression():
x = tf.placeholder(tf.float32, shape=(None,), name='x')
y = tf.placeholder(tf.float32, shape=(None,), name='y')
with tf.variable_scope('lreg') as scope:
w = tf.Variable(np.random.normal(), name='W')
y_pred = tf.mul(w, x)
loss = tf.reduce_mean(tf.square(y_pred - y))
return x, y, y_pred, loss
def run():
x_batch, y_batch = generate_dataset()
x, y, y_pred, loss = linear_regression()
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
feed_dict = {x: x_batch, y: y_batch}
for _ in range(30):
loss_val, _ = sess.run([loss, optimizer], feed_dict=feed_dict)
print('loss:', loss_val.mean())
y_pred_batch = sess.run(y_pred, {x: x_batch})
plt.figure(1)
plt.scatter(x_batch, y_batch)
plt.plot(x_batch, y_pred_batch)
plt.savefig('plot.png')
if __name__ == '__main__':
run()
| apache-2.0 |
ceholden/yatsm | yatsm/cli/train.py | 1 | 11912 | """ Command line interface for training classifiers on YATSM output """
from datetime import datetime as dt
import logging
import os
import click
import matplotlib.pyplot as plt
import numpy as np
from osgeo import gdal
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.externals import joblib
from . import options
from ..config_parser import parse_config_file
from ..classifiers import cfg_to_algorithm, diagnostics
from ..errors import TrainingDataException
from .. import io, plots, utils
logger = logging.getLogger('yatsm')
gdal.AllRegister()
gdal.UseExceptions()
if hasattr(plt, 'style') and 'ggplot' in plt.style.available:
plt.style.use('ggplot')
@click.command(short_help='Train classifier on YATSM output')
@options.arg_config_file
@click.argument('classifier_config', metavar='<classifier_config>', nargs=1,
type=click.Path(exists=True, readable=True,
dir_okay=False, resolve_path=True))
@click.argument('model', metavar='<model>', nargs=1,
type=click.Path(writable=True, dir_okay=False,
resolve_path=True))
@click.option('--kfold', 'n_fold', nargs=1, type=click.INT, default=3,
help='Number of folds in cross validation (default: 3)')
@click.option('--seed', nargs=1, type=click.INT,
help='Random number generator seed')
@click.option('--plot', is_flag=True, help='Show diagnostic plots')
@click.option('--diagnostics', is_flag=True, help='Run K-Fold diagnostics')
@click.option('--overwrite', is_flag=True, help='Overwrite output model file')
@click.pass_context
def train(ctx, config, classifier_config, model, n_fold, seed,
plot, diagnostics, overwrite):
"""
Train a classifier from ``scikit-learn`` on YATSM output and save result to
file <model>. Dataset configuration is specified by <yatsm_config> and
classifier and classifier parameters are specified by <classifier_config>.
"""
# Setup
if not model.endswith('.pkl'):
model += '.pkl'
if os.path.isfile(model) and not overwrite:
raise click.ClickException('<model> exists and --overwrite was not '
'specified')
if seed:
np.random.seed(seed)
# Parse config & algorithm config
cfg = parse_config_file(config)
algo, algo_cfg = cfg_to_algorithm(classifier_config)
training_image = cfg['classification']['training_image']
if not training_image or not os.path.isfile(training_image):
raise click.ClickException('Training data image {} does not exist'
.format(training_image))
# Find information from results -- e.g., design info
attrs = find_result_attributes(cfg)
cfg['YATSM'].update(attrs)
# Cache file for training data
has_cache = False
training_cache = cfg['classification']['cache_training']
if training_cache:
# If doesn't exist, retrieve it
if not os.path.isfile(training_cache):
logger.info('Could not retrieve cache file for Xy')
logger.info(' file: %s' % training_cache)
else:
logger.info('Restoring X/y from cache file')
has_cache = True
training_image = cfg['classification']['training_image']
# Check if we need to regenerate the cache file because training data is
# newer than the cache
regenerate_cache = is_cache_old(training_cache, training_image)
if regenerate_cache:
logger.warning('Existing cache file older than training data ROI')
logger.warning('Regenerating cache file')
if not has_cache or regenerate_cache:
logger.debug('Reading in X/y')
X, y, row, col, labels = get_training_inputs(cfg)
logger.debug('Done reading in X/y')
else:
logger.debug('Reading in X/y from cache file %s' % training_cache)
with np.load(training_cache) as f:
X = f['X']
y = f['y']
row = f['row']
col = f['col']
labels = f['labels']
logger.debug('Read in X/y from cache file %s' % training_cache)
# If cache didn't exist but is specified, create it for first time
if not has_cache and training_cache:
logger.info('Saving X/y to cache file %s' % training_cache)
try:
np.savez(training_cache,
X=X, y=y, row=row, col=col, labels=labels)
except Exception as e:
raise click.ClickException('Could not save X/y to cache file ({})'
.format(e))
# Do modeling
logger.info('Training classifier')
algo.fit(X, y, **algo_cfg.get('fit', {}))
# Serialize algorithm to file
logger.info('Pickling classifier with sklearn.externals.joblib')
joblib.dump(algo, model, compress=3)
# Diagnostics
if diagnostics:
algo_diagnostics(cfg, X, y, row, col, algo, n_fold, plot)
def is_cache_old(cache_file, training_file):
""" Indicates if cache file is older than training data file
Args:
cache_file (str): filename of the cache file
training_file (str): filename of the training data file
Returns:
bool: True if the cache file is older than the training data file
and needs to be updated; False otherwise
"""
if cache_file and os.path.isfile(cache_file):
return os.stat(cache_file).st_mtime < os.stat(training_file).st_mtime
else:
return False
def find_result_attributes(cfg):
""" Return result attributes relevant for training a classifier
At this time, the only relevant information is the design information,
``design (OrderedDict)`` and ``design_matrix (str)``
Args:
cfg (dict): YATSM configuration dictionary
Returns:
dict: dictionary of result attributes
"""
attrs = {
'design': None,
'design_matrix': None
}
for result in utils.find_results(cfg['dataset']['output'],
cfg['dataset']['output_prefix'] + '*'):
try:
md = np.load(result)['metadata'].item()
attrs['design'] = md['YATSM']['design']
attrs['design_matrix'] = md['YATSM']['design_matrix']
except:
pass
else:
return attrs
raise AttributeError('Could not find following attributes in results: {}'
.format(attrs.keys()))
def get_training_inputs(cfg, exit_on_missing=False):
""" Returns X features and y labels specified in config file
Args:
cfg (dict): YATSM configuration dictionary
exit_on_missing (bool, optional): exit if input feature cannot be found
Returns:
X (np.ndarray): matrix of feature inputs for each training data sample
y (np.ndarray): array of labeled training data samples
row (np.ndarray): row pixel locations of `y`
col (np.ndarray): column pixel locations of `y`
labels (np.ndarraY): label of `y` if found, else None
"""
# Find and parse training data
roi = io.read_image(cfg['classification']['training_image'])
logger.debug('Read in training data')
if len(roi) == 2:
logger.info('Found labels for ROIs -- including in output')
labels = roi[1]
else:
roi = roi[0]
labels = None
# Determine start and end dates of training sample relevance
try:
training_start = dt.strptime(
cfg['classification']['training_start'],
cfg['classification']['training_date_format']).toordinal()
training_end = dt.strptime(
cfg['classification']['training_end'],
cfg['classification']['training_date_format']).toordinal()
except:
logger.error('Failed to parse training data start or end dates')
raise
# Loop through samples in ROI extracting features
mask_values = cfg['classification']['roi_mask_values']
mask = ~np.in1d(roi, mask_values).reshape(roi.shape)
row, col = np.where(mask)
y = roi[row, col]
X = []
out_y = []
out_row = []
out_col = []
_row_previous = None
for _row, _col, _y in zip(row, col, y):
# Load result
if _row != _row_previous:
output_name = utils.get_output_name(cfg['dataset'], _row)
try:
rec = np.load(output_name)['record']
_row_previous = _row
except:
logger.error('Could not open saved result file %s' %
output_name)
if exit_on_missing:
raise
else:
continue
# Find intersecting time segment
i = np.where((rec['start'] < training_start) &
(rec['end'] > training_end) &
(rec['px'] == _col))[0]
if i.size == 0:
logger.debug('Could not find model for label %i at x/y %i/%i' %
(_y, _col, _row))
continue
elif i.size > 1:
raise TrainingDataException(
'Found more than one valid model for label %i at x/y %i/%i' %
(_y, _col, _row))
# Extract coefficients with intercept term rescaled
coef = rec[i]['coef'][0, :]
coef[0, :] = (coef[0, :] +
coef[1, :] * (rec[i]['start'] + rec[i]['end']) / 2.0)
X.append(np.concatenate((coef.reshape(coef.size), rec[i]['rmse'][0])))
out_y.append(_y)
out_row.append(_row)
out_col.append(_col)
out_row = np.array(out_row)
out_col = np.array(out_col)
if labels is not None:
labels = labels[out_row, out_col]
return np.array(X), np.array(out_y), out_row, out_col, labels
def algo_diagnostics(cfg, X, y,
row, col, algo, n_fold, make_plots=True):
""" Display algorithm diagnostics for a given X and y
Args:
cfg (dict): YATSM configuration dictionary
X (np.ndarray): X feature input used in classification
y (np.ndarray): y labeled examples
row (np.ndarray): row pixel locations of `y`
col (np.ndarray): column pixel locations of `y`
algo (sklearn classifier): classifier used from scikit-learn
n_fold (int): number of folds for crossvalidation
make_plots (bool, optional): show diagnostic plots (default: True)
"""
# Print algorithm diagnostics without crossvalidation
logger.info('<----- DIAGNOSTICS ----->')
if hasattr(algo, 'oob_score_'):
logger.info('Out of Bag score: %f' % algo.oob_score_)
kfold_summary = np.zeros((0, 2))
test_names = ['KFold', 'Stratified KFold', 'Spatial KFold (shuffle)']
def report(kf):
logger.info('<----------------------->')
logger.info('%s crossvalidation scores:' % kf.__class__.__name__)
try:
scores = diagnostics.kfold_scores(X, y, algo, kf)
except Exception as e:
logger.warning('Could not perform %s cross-validation: %s' %
(kf.__class__.__name__, e))
return (np.nan, np.nan)
else:
return scores
kf = KFold(y.size, n_folds=n_fold)
kfold_summary = np.vstack((kfold_summary, report(kf)))
kf = StratifiedKFold(y, n_folds=n_fold)
kfold_summary = np.vstack((kfold_summary, report(kf)))
kf = diagnostics.SpatialKFold(y, row, col, n_folds=n_fold, shuffle=True)
kfold_summary = np.vstack((kfold_summary, report(kf)))
if make_plots:
plots.plot_crossvalidation_scores(kfold_summary, test_names)
logger.info('<----------------------->')
if hasattr(algo, 'feature_importances_'):
logger.info('Feature importance:')
logger.info(algo.feature_importances_)
if make_plots:
plots.plot_feature_importance(algo, cfg)
| mit |
Lawrence-Liu/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
nmabhi/Webface | demos/classifier.py | 1 | 10799 | #!/usr/bin/env python2
#
# Example to classify faces.
# Brandon Amos
# 2015/10/11
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import sys
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
#modelDir = os.path.join(fileDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(imgPath, multiple=False):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
if multiple:
bbs = align.getAllFaceBoundingBoxes(rgbImg)
else:
bb1 = align.getLargestFaceBoundingBox(rgbImg)
bbs = [bb1]
if len(bbs) == 0 or (not multiple and bb1 is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
reps = []
for bb in bbs:
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
print("This bbox is centered at {}, {}".format(bb.center().x, bb.center().y))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
reps.append((bb.center().x, rep))
sreps = sorted(reps, key=lambda x: x[0])
return sreps
def train(args):
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if args.classifier == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif args.classifier == 'GridSearchSvm':
print("""
Warning: In our experiences, using a grid search over SVM hyper-parameters only
gives marginally better performance than a linear SVM with C=1 and
is not worth the extra computations of performing a grid search.
""")
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
clf = GridSearchCV(SVC(C=1, probability=True), param_grid, cv=5)
elif args.classifier == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif args.classifier == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif args.classifier == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif args.classifier == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif args.classifier == 'DBN':
from nolearn.dbn import DBN
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=1)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = "{}/classifier.pkl".format(args.workDir)
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
def infer(args, multiple=False):
with open(args.classifierModel, 'rb') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f)
else:
(le, clf) = pickle.load(f, encoding='latin1')
for img in args.imgs:
print("\n=== {} ===".format(img))
reps = getRep(img, multiple)
if len(reps) > 1:
print("List of faces in image from left to right")
for r in reps:
rep = r[1].reshape(1, -1)
bbx = r[0]
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
if multiple:
print("Predict {} @ x={} with {:.2f} confidence.".format(person.decode('utf-8'), bbx,
confidence))
else:
print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GridSearchSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferParser.add_argument('--multi', help="Infer multiple faces in image",
action="store_true")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
if args.mode == 'infer' and args.classifierModel.endswith(".t7"):
raise Exception("""
Torch network model passed as the classification model,
which should be a Python pickle (.pkl)
See the documentation for the distinction between the Torch
network and classification models:
http://cmusatyalab.github.io/openface/demo-3-classifier/
http://cmusatyalab.github.io/openface/training-new-models/
Use `--networkModel` to set a non-standard Torch network model.""")
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args, args.multi)
| apache-2.0 |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/01 - speed test files/reuniao1.py | 1 | 3569 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 9 16:03:54 2016
@author: camacho
"""
import Kernel;reload(Kernel);kl = Kernel
#import Likelihood as lk
import numpy as np
import matplotlib.pyplot as pl
from time import time
import george as ge
##### LIKELIHOOD
def likelihood1(kernel, x, xcalc, y, yerr): #covariance matrix calculations
start = time() #Corrected and faster version
K = np.zeros((len(x),len(x))) #covariance matrix K
start = time() #Corrected and faster version
for i in range(len(x)):
x1 = x[i]
for j in range(len(xcalc)):
x2 = xcalc[j]
K[i,j] = kernel(x1, x2)
K=K+yerr**2*np.identity(len(x))
# start = time() #Corrected and faster version
# log_p_correct = lnlike(K, y)
# tempo= (time() - start)
# print 'likelihood ->', log_p_correct
# return K
r=y
#def lnlike(K, r): #log-likelihood calculations
# start = time() #Corrected and faster version
from scipy.linalg import cho_factor, cho_solve
L1 = cho_factor(K) # tuple (L, lower)
sol = cho_solve(L1, r) # this is K^-1*(r)
n = r.size
logLike = -0.5*np.dot(r, sol) \
- np.sum(np.log(np.diag(L1[0]))) \
- n*0.5*np.log(2*np.pi)
tempo= (time() - start)
return tempo
#return logLikeleft
pontos=[]
temposES=[];temposESS=[];temposRQ=[]
georgeES=[];georgeESS=[];georgeRQ=[]
for i in np.arange(50,500,25):
pontos.append(i)
np.random.seed(100)
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
kernel1=kl.ExpSquared(19.0, 2.0)
tempo1=likelihood1(kernel1, x, x, y, yerr)
temposES.append(tempo1)
kernel2=kl.ExpSineSquared(15.0, 2.0, 10.0)
tempo2=likelihood1(kernel2, x, x, y, yerr)
temposESS.append(tempo2)
kernel3=kl.RatQuadratic(1.0,1.5,1.0)
tempo3=likelihood1(kernel2, x, x, y, yerr)
temposRQ.append(tempo3)
###########################################################################
start = time() # Calculation using george
kernelg1 = 19**2*ge.kernels.ExpSquaredKernel(2.0**2)
gp = ge.GP(kernelg1)
gp.compute(x,yerr)
gp.lnlikelihood(y)
tempog1=(time() - start)
georgeES.append(tempog1)
start = time() # Calculation using george
kernelg2 = 15.0**2*ge.kernels.ExpSine2Kernel(2/2.0**2,10.0)
gp = ge.GP(kernelg2)
gp.compute(x,yerr)
gp.lnlikelihood(y)
tempog2=(time() - start)
georgeESS.append(tempog2)
start = time() # Calculation using george
kernelg3 = 1.0**2*ge.kernels.RationalQuadraticKernel(1.5,1.0**2)
gp = ge.GP(kernelg3)
gp.compute(x,yerr)
gp.lnlikelihood(y)
tempog3=(time() - start)
georgeRQ.append(tempog3)
#print temposES
#print temposESS
#print temposRQ
N=np.log(pontos)
logES= np.log(temposES)
logESS= np.log(temposESS)
logRQ= np.log(temposRQ)
log_geoES= np.log(georgeES)
log_geoESS= np.log(georgeESS)
log_geoRQ= np.log(georgeRQ)
N2=np.log(pontos)**2
N3=np.log(pontos)**3
pl.plot(N,logES)
pl.plot(N,logESS)
pl.plot(N,logRQ)
#pl.plot(N2,logES)
#pl.plot(N2,logESS)
#pl.plot(N2,logRQ)
#pl.plot(N3,logES)
#pl.plot(N3,logESS)
#pl.plot(N3,logRQ)
pl.plot(N,log_geoES)
pl.plot(N,log_geoESS)
pl.plot(N,log_geoRQ)
pl.xlabel('Points - log(N)')
pl.ylabel('Time - log(s)')
pl.title('Log marginal likelihood calculations')
pl.legend(['ExpSquared', 'ExpSineSquared', 'RatQuadratic', \
'george ES','george ESS','george RQ'], loc='upper left')
| mit |
eyalfa/spark | python/pyspark/sql/tests.py | 1 | 270461 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegexp(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEquals(readback.schema, schema)
finally:
shutil.rmtree(path)
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
wrong_schema = StructType(list(reversed(self.schema)))
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list('abcdefgh'))
self.assertEquals(df.schema.fieldNames(), list('abcdefgh'))
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple('abcdefgh'))
self.assertEquals(df.schema.fieldNames(), list('abcdefgh'))
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from py4j.protocol import Py4JJavaError
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
from pyspark.sql.functions import col, udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
from pyspark.sql import Column
from pyspark.sql.functions import udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
from collections import OrderedDict
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
with self.sql_conf({"spark.sql.execution.pandas.groupedMap.assignColumnsByPosition": True}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
import pyspark.sql.functions as F
@F.pandas_udf('key long, col string', F.PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
F.col('temp0.key') == F.col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
def test_simple(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, percent_rank, mean, max
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
from pyspark.sql.functions import max, min, mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
from pyspark.sql.functions import max, min, rank, col
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean, pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
ow = self.ordered_window
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*Only unbounded window frame is supported.*'):
df.withColumn('mean_v', mean_udf(df['v']).over(ow))
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| apache-2.0 |
BoolLi/LeapMotionDesignChallenge | infix2postfix.py | 1 | 5605 | """ CSCI204 Stack lab """
from stack import MyStack
import sys
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
stack = MyStack()
RESOLUTION = 30
def printShit():
print "shit"
def translate( expression ):
""" Translates the given simple infix arithmetic expression to postfix
notation. Returns the result as a string.
Examine each number and operator in the input.
If it's a number, add it to the output.
Else if its an operator, handle it
Else it was an error
Empty the stack onto the output
"""
output = ""
emptyStack()
for ch in expression:
if isNumber( ch ) or isVar( ch ):
output += ch
elif ch == '(':
stack.push(ch)
elif ch == ')':
while not stack.isEmpty() and stack.peek() != '(':
output += stack.pop()
if stack.peek() == '(':
stack.pop()
elif stack.peek() == ')':
raise ParensMismatchException()
else:
raise ParensMismatchException()
elif isOperator( ch ):
output += handleOperator( ch )
else:
raise IllegalExpressionException()
output += emptyStack()
return output
def emptyStack():
""" Empties the stack while collecting each element as it is removed.
Returns the collected elements. """
elements = ""
while not stack.isEmpty():
if isOperator(stack.peek()):
elements += stack.pop()
elif stack.peek() == '(':
raise ParensMismatchException()
else:
stack.pop()
return elements
def isNumber( ch ):
""" Is the given character a number? """
return (ch >= '0' and ch <= '9')
def isOperator( ch ):
""" Is the given character an operator? """
return ch == '+' or ch == '-' or ch == '*' or ch == '/' or ch == '^'
def isX( ch ):
""" Is the given character an x?"""
return ch == 'x' or ch == 'X'
def isY( ch ):
""" Is the given character an x?"""
return ch == 'y' or ch == 'Y'
def isVar( ch ):
return isX(ch) or isY(ch)
def handleOperator( operator ):
""" Pops all operators of the same or greater precedence as the given
operator from the stack and then pushes the given operator. """
elements = popHigherPrecedenceOps( operator )
stack.push( operator )
return elements
def popHigherPrecedenceOps( operator ):
""" Pops operators that have precedence >= the given operator. """
elements = ""
while (not stack.isEmpty()) and isTopHigherPrecedence( operator ) and stack.peek() != '(':
elements += stack.pop()
return elements
def isTopHigherPrecedence( operator ):
""" Does the operator on the stack top have precedence >= to the
given operator?
Convert operators into levels of precedence.
Lower levels indicate lower precedence.
Additive operators (+ -) are level 0.
Multiplicative operators (* /) are level 1.
Then compare the level
"""
top = stack.peek()
if operator == '+' or operator == '-':
opLevel = 0
elif operator == '*' or operator == '/':
opLevel = 1
else:
opLevel = 2
if top == '+' or top == '-':
topLevel = 0
elif top == '*' or top == '/':
topLevel = 1
else:
topLevel = 2
return topLevel >= opLevel
def transfer_to_postfix( expression , x, y):
"""computes the answer to a postfix expression
"""
emptyStack()
output = ""
for ch in expression:
if isNumber(ch):
stack.push(str(ch))
elif isX(ch):
stack.push(x)
elif isY(ch):
stack.push(y)
else:
right_operator = float(stack.pop())
left_operator = float(stack.pop())
if ch == '^':
output = left_operator ** right_operator
elif ch == '+':
output = left_operator + right_operator
elif ch == '-':
output = left_operator - right_operator
elif ch == '*':
output = left_operator * right_operator
elif ch == '/':
output = left_operator / right_operator
stack.push(output)
return stack.pop()
class IllegalExpressionException( BaseException ):
pass
class ParensMismatchException( BaseException ):
pass
def main():
print( "Enter 'quit' to end this program" )
begin = int(raw_input( "Enter the beginning of your range: " ));
end = int(raw_input ( "Enter the end of the range: " ));
X = np.linspace(begin, end, RESOLUTION)
Y = np.linspace(begin, end, RESOLUTION)
Z = np.zeros((RESOLUTION,RESOLUTION))
while True:
infix = raw_input( "Enter a expression for Z(x,y): " )
if infix == "quit": sys.exit()
try:
postfix = translate( infix )
print( infix + " ==> " + postfix )
break
except IllegalExpressionException:
print("Error. Illegal operator.")
except ParensMismatchException:
print("Error. Mismatsched parenthesis.")
for i in range(RESOLUTION):
for j in range(RESOLUTION):
Z[i][j] = transfer_to_postfix( postfix, X[i], Y[j] )
X,Y = np.meshgrid(X,Y)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap = "Oranges_r", linewidth=0, antialiased=True)
plt.show()
| mit |
Urinx/Machine_Learning | BP-Neural-Networks/BP_Neural_Network.py | 1 | 4609 | #! /usr/bin/env python
# coding:utf-8
#######################################
# Handwritten Digit (0-9) Recognizion #
#######################################
from numpy import *
import numpy as np
from matplotlib.pyplot import *
from pylab import *
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin_cg
from scipy.io import loadmat
class ML():
def __init__(self,x=[],y=[]):
self.X=x
self.Y=y
self.Theta=[]
self.Alpha=0.01
self.Iterations=50
self.Lambda=1
def load(self,fname,d=','):
data=loadtxt(fname,delimiter=d)
self.X=data[:,:-1]
self.Y=data[:,-1:]
def loadMat(self,fname):
return loadmat(fname)
def initXY(self,data):
m=data.shape[0]
x=hstack((ones((m,1)),data))
return x,self.Y,m
def Normalization(self):
mu=mean(data,0)
sigma=std(data,0)
data_Norm=(data-mu)/sigma
return data_Norm,mu,sigma
def sigmoid(self,z):
return 1/(1+exp(-z))
def sigmoidGradient(self,z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def J(self):
pass
def predict(self,x):
return array([1]+x).dot(self.Theta)
def evaluate(self):
pass
def mapFeature(self,data,k):
x1,x2=data[:,0:1],data[:,1:]
m=x1.shape[0]
x=ones((m,1))
for i in xrange(1,k+1):
for j in xrange(i+1):
x=hstack((x,x1**j+x2**(i-j)))
return x
def addOne(self,x):
m=x.shape[0]
one=ones((m,1))
return hstack((one,x))
def plot(self):
pass
def show(self):
show()
class BP_Neural_Network(ML):
def __init__(self,fname,x=[],y=[]):
self.Lambda=1
self.Input_Layer_Size=400
self.Hidden_Layer_Size=25
self.Output_Layer_Size=10
mat=self.loadMat(fname)
self.X=mat['X']
self.Y=mat['y']
# Because the number 0 in y is labled as 10
self.Y[self.Y==10]=0
i,h,o=self.layerSize()
self.Y_k=eye(o)[self.Y][:,0]
total=(i+1)*h+(h+1)*o
self.Theta_Grad=0.2*ones((1,total)).flatten()
def layerSize(self):
return self.Input_Layer_Size,self.Hidden_Layer_Size,self.Output_Layer_Size
def feedForward(self,theta,x):
m=x.shape[0]
i,h,o=self.layerSize()
theta1=theta[:h*(1+i)].reshape(h,1+i)
theta2=theta[h*(1+i):].reshape(o,1+h)
a1=self.addOne(x)
z2=a1.dot(theta1.T)
a2=self.addOne(self.sigmoid(z2))
z3=a2.dot(theta2.T)
a3=self.sigmoid(z3)
return m,i,h,o,theta1,theta2,a1,a2,a3,z2,z3
def J(self,theta,x,y,cal_grad=0):
lada=self.Lambda
m,i,h,o,theta1,theta2,a1,a2,a3,z2,z3=self.feedForward(theta,x)
j=sum(-y*log(a3)-(1-y)*log(1-a3))/m
j+=lada*(sum(theta1[:,1:]**2)+sum(theta2[:,1:]**2))/(2*m)
# Calculate The Gradient
if cal_grad:
delta3=a3-y
delta2=delta3.dot(theta2[:,1:])*self.sigmoidGradient(z2)
Delta2=delta3.T.dot(a2)
Delta1=delta2.T.dot(a1)
r1=lada*theta1/m
r2=lada*theta2/m
r1[:,0]=0
r2[:,0]=0
theta1_grad=(Delta1/m+r1).flatten()
theta2_grad=(Delta2/m+r2).flatten()
self.Theta_Grad=hstack((theta1_grad,theta2_grad))
# End
return j
def gradient(self,theta):
return self.Theta_Grad
def randInitialWeights(self,L_in,L_out):
epsilon_init=sqrt(6)/sqrt(L_in+L_out)
W=np.random.rand(L_out,1+L_in)*2*epsilon_init-epsilon_init
return W
def minJ(self):
i,h,o=self.layerSize()
init_theta1=self.randInitialWeights(i,h)
init_theta2=self.randInitialWeights(h,o)
initial_theta=hstack((init_theta1.flatten(),init_theta2.flatten()))
args=(self.X,self.Y_k,1)
j=lambda theta: self.J(theta,*args)
self.Theta=fmin_cg(j,initial_theta,fprime=self.gradient,maxiter=50)
def predict(self):
m,i,h,o,theta1,theta2,a1,a2,a3,z2,z3=self.feedForward(self.Theta,self.X)
p=argmax(z3,axis=1).reshape(m,1)
accuracy=array(p==self.Y,dtype=int).sum()*1./m
print 'Training Set Accuracy:',accuracy
def terminalPlot(self):
x=self.X
for n in xrange(0,x.shape[0],500):
num=x[n,:].reshape((20,20)).T
for line in num:
s=''
for i in line:
if abs(i)<0.1: s+='0'
else: s+='1'
print s
def checkNNGradients(self):
'''
In the function checkNNGradients, our code creates a small
random model and dataset which is used with computeNumericalGradient
for gradient checking. Furthermore, after you are confident
that your gradient computations are correct, you should turn
off gradient checking before running your learning algorithm.
'''
pass
def computeNumbericalGradient(self):
epsilon=1e-4
x=self.X
y=self.Y_k
theta=self.Theta
m=len(theta)
f=[]
for i in xrange(m):
tmp=zeros(m)
tmp[i]=epsilon
f.append((self.J(theta+tmp,x,y)-self.J(theta-tmp,x,y))/(2*epsilon))
return f
if __name__=='__main__':
test=BP_Neural_Network('ex4data1.mat')
#test.terminalPlot()
test.minJ()
test.predict() | gpl-2.0 |
mblaauw/Kaggle_CatsVsDogs | cats_vs_dogs.py | 1 | 5074 | __author__ = 'MICH'
import os
import cv2
import numpy as np
from scipy.sparse import lil_matrix
from scipy.stats import expon
from sklearn.decomposition import RandomizedPCA
from sklearn import cross_validation
from sklearn import svm
from sklearn import metrics
from time import time
from sklearn.grid_search import RandomizedSearchCV
wd = 'C:/03_P-PROJECTS/Kaggle_CatsVsDogs/' #change this to make the code work
dataTrainDir = 'C:/03_P-PROJECTS/Kaggle_CatsVsDogs/Data/Data/train/'
dataTestDir = 'C:/03_P-PROJECTS/Kaggle_CatsVsDogs/Data/test/'
os.chdir(wd)
labels = ['cat.', 'dog.']
desiredDimensions = [30, 30]
#define loading and pre-processing function grayscale
def preprocessImg(animal, number, dim1, dim2, dataDir):
imageName = '{0:s}{1:s}{2:d}{3:s}'.format(dataDir, animal, number, '.jpg')
npImage = cv2.imread(imageName)
npImage = cv2.cvtColor(npImage, cv2.COLOR_BGR2GRAY)
avg = np.mean(npImage.reshape(1, npImage.shape[0] * npImage.shape [1]))
avg = np.tile(avg, (npImage.shape[0], npImage.shape [1]))
npImage = npImage - avg
npImage = cv2.resize(npImage, (dim1, dim2))
return(npImage.reshape(1, dim1 * dim2))
#m = 1000 #pet Train dataset
m = 12500 #full Train dataset
mTest = 12500 #number of images in the test set
indexesIm = np.random.permutation(m * len(labels))
idxImages = np.tile(range(m), len(labels))
idxImages = idxImages[indexesIm]
testIndexes = range(len(indexesIm), len(indexesIm) + mTest)
y = np.append(np.tile(0, m), np.tile(1, m))
y = y[indexesIm]
def animalInput(theNumber):
if theNumber == 0:
return 'cat.'
elif theNumber == 1:
return 'dog.'
else:
return ''
#Build the sparse matrix with the preprocessed image data for both train and test data
bigMatrix = lil_matrix((len(indexesIm) + len(testIndexes), desiredDimensions[0] * desiredDimensions[1]))
for i in range(len(indexesIm)):
bigMatrix[i, :] = preprocessImg(animalInput(y[i]), idxImages[i], desiredDimensions[0], desiredDimensions[1], dataTrainDir)
someNumbers = range(mTest)
for ii in someNumbers:
bigMatrix[testIndexes[ii], :] = preprocessImg(animalInput('printNothing'), ii + 1, desiredDimensions[0], desiredDimensions[1], dataTestDir)
#Transform to csr matrix
bigMatrix = bigMatrix.tocsr()
#Reduce features to main components so that they contain 99% of variance
pca = RandomizedPCA(n_components=150, whiten = True)
pca.fit(bigMatrix)
varianceExplained = pca.explained_variance_ratio_
print(pca.explained_variance_ratio_)
def anonFunOne(vector):
variance = 0
for ii in range(len(vector)):
variance += vector[ii]
if variance > 0.99:
componentIdx = ii
return(componentIdx)
break
pca = RandomizedPCA(n_components=150, whiten = True)
BigMatrixReduced = pca.fit_transform(bigMatrix, y = anonFunOne(varianceExplained))
#Divide train Matrix and Test Matrix (for which I don't have labels)
trainMatrixReduced = BigMatrixReduced[0:max(indexesIm), :]
testMatrixReduced = BigMatrixReduced[testIndexes[0]:BigMatrixReduced.shape[0], :]
#Divide dataset for cross validation purposes
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
trainMatrixReduced, y[0:24999], test_size=0.4, random_state=0) #fix this
#random grid search of hiperparameters
#create a classifier
clf = svm.SVC(verbose = True)
# specify parameters and distributions to sample from
params2Test = {'C': expon(scale=100), 'gamma': expon(scale=.1),
'kernel': ['rbf'], 'class_weight':['auto']}
#run randomized search
n_iter_search = 5
random_search = RandomizedSearchCV(clf, param_distributions = params2Test, n_iter = n_iter_search)
start = time()
random_search.fit(X_train, y_train)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
type(random_search.grid_scores_)
#Machine Learning part
#Support vector machine model
clf.fit(X_train, y_train)
#prediction
predictionFromDataset = clf.predict(X_test)
correctValues = sum(predictionFromDataset == y_test)
percentage = float(correctValues)/len(y_test)
print(percentage)
#prediction probability
predictionFromDataset2 = clf.predict_proba(X_test)
predictionFromDataset2 = predictionFromDataset2[:, 1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, predictionFromDataset2)
predictionProbability = metrics.auc(fpr, tpr)
#Predict images from the test set
#Train the model with full data set
clf = svm.SVC(verbose = True)
clf.fit(trainMatrixReduced, y[0:24999]) #fix this
#Prediction
#predictionFromTest = clf.predict_proba(testMatrixReduced)
predictionFromTest = clf.predict(testMatrixReduced)
#label = predictionFromTest[:, 1]
idVector = range(1, mTest + 1)
#predictionsToCsv = np.column_stack((idVector, label))
predictionsToCsv = np.column_stack((idVector, predictionFromTest))
import csv
ofile = open('predictionIII.csv', "wb")
fileToBeWritten = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for row in predictionsToCsv:
fileToBeWritten.writerow(row)
ofile.close() | mit |
madjelan/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
arboreus/exercises | nn_test_part2.py | 1 | 5979 | #%% Clear variable list
def clearall():
"""clear all globals"""
for uniquevar in [var for var in globals().copy() if var[0] != "_" and var != 'clearall']:
del globals()[uniquevar]
clearall()
#%%
# Python imports
import numpy as np # Matrix and vector computation package
np.seterr(all='ignore') # ignore numpy warning like multiplication of inf
import matplotlib.pyplot as plt # Plotting library
from matplotlib.colors import colorConverter, ListedColormap # some plotting functions
from matplotlib import cm # Colormaps
#import seaborn as sns
# Allow matplotlib to plot inside this notebook
#%matplotlib inline
# Set the seed of the numpy random number generator so that the tutorial is reproducable
np.random.seed(seed=1)
#%%
# Define and generate the samples
nb_of_samples_per_class = 20 # The number of sample in each class
red_mean = [-1,0] # The mean of the red class
blue_mean = [1,0] # The mean of the blue class
std_dev = 1.2 # standard deviation of both classes
# Generate samples from both classes
x_red = np.random.randn(nb_of_samples_per_class, 2) * std_dev + red_mean
x_blue = np.random.randn(nb_of_samples_per_class, 2) * std_dev + blue_mean
# Merge samples in set of input variables x, and corresponding set of output variables t
X = np.vstack((x_red, x_blue))
t = np.vstack((np.zeros((nb_of_samples_per_class,1)), np.ones((nb_of_samples_per_class,1))))
plt.plot(x_red[:,0], x_red[:,1], 'ro', label='class red')
plt.plot(x_blue[:,0], x_blue[:,1], 'bo', label='class blue')
plt.grid()
plt.legend(loc=2)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15)
plt.axis([-4, 4, -4, 4])
plt.title('red vs blue classes in the input space')
plt.show()
#%%
# Define the logistic function
def logistic(z): return 1 / (1 + np.exp(-z))
# Define the neural network function y = 1 / (1 + numpy.exp(-x*w))
def nn(x, w): return logistic(x.dot(w.T))
# Define the neural network prediction function that only returns
# 1 or 0 depending on the predicted class
def nn_predict(x,w): return np.around(nn(x,w))
# Define the cost function
def cost(y, t):
return - np.sum(np.multiply(t, np.log(y)) + np.multiply((1-t), np.log(1-y)))
# Plot the cost in function of the weights
# Define a vector of weights for which we want to plot the cost
nb_of_ws = 100 # compute the cost nb_of_ws times in each dimension
ws1 = np.linspace(-5, 5, num=nb_of_ws) # weight 1
ws2 = np.linspace(-5, 5, num=nb_of_ws) # weight 2
ws_x, ws_y = np.meshgrid(ws1, ws2) # generate grid
cost_ws = np.zeros((nb_of_ws, nb_of_ws)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(nb_of_ws):
for j in range(nb_of_ws):
cost_ws[i,j] = cost(nn(X, np.asmatrix([ws_x[i,j], ws_y[i,j]])) , t)
# Plot the cost function surface
plt.contourf(ws_x, ws_y, cost_ws, 20, cmap=cm.pink)
cbar = plt.colorbar()
cbar.ax.set_ylabel('$\\xi$', fontsize=15)
plt.xlabel('$w_1$', fontsize=15)
plt.ylabel('$w_2$', fontsize=15)
plt.title('Cost function surface')
plt.grid()
plt.show()
#%%
# define the gradient function.
def gradient(w, x, t): return (nn(x, w) - t).T * x
# define the update function delta w which returns the
# delta w for each weight in a vector
def delta_w(w_k, x, t, learning_rate):
return learning_rate * gradient(w_k, x, t)
# Set the initial weight parameter
w = np.asmatrix([-4, -2])
# Set the learning rate
learning_rate = 0.05
# Plot the error surface
plt.contourf(ws_x, ws_y, cost_ws, 20, alpha=0.6, cmap=cm.pink)
cbar = plt.colorbar()
cbar.ax.set_ylabel('cost')
# Start the gradient descent updates and plot the iterations
nb_of_iterations = 3 # number of gradient descent updates
for i in range(nb_of_iterations):
dw = delta_w(w, X, t, learning_rate) # get the delta w update
# Plot the weight-cost value and the line that represents the update
plt.plot(w[0,0], w[0,1], 'ko') # Plot the weight cost value
w_new = w-dw # update the weights
plt.plot([w[0,0], w_new[0,0]], [w[0,1], w_new[0,1]], 'k-')
plt.text(w[0,0]-0.2, w[0,1]+0.4, '$w({})$'.format(i), color='k')
w = w_new # set the weight to the updated weights
# Plot the last weight, axis, and show figure
plt.plot(w[0,0], w[0,1], 'ko')
plt.text(w[0,0]-0.2, w[0,1]+0.4, '$w({})$'.format(nb_of_iterations), color='k')
plt.xlabel('$w_1$', fontsize=15)
plt.ylabel('$w_2$', fontsize=15)
plt.title('Gradient descent updates on cost surface')
plt.grid()
plt.show()
#%%
# Set the initial weight parameter
w = np.asmatrix([-4, -2])
# Set the learning rate
learning_rate = 0.05
# Start performing the gradient descent updates, and print the weights and cost:
nb_of_iterations = 10 # number of gradient descent updates
for i in range(nb_of_iterations):
dw = delta_w(w, X, t, learning_rate) # get the delta w update
w = w - dw # update the current weight parameter
# Plot the resulting decision boundary
# Generate a grid over the input space to plot the color of the
# classification at that grid point
nb_of_xs = 200
xs1 = np.linspace(-4, 4, num=nb_of_xs)
xs2 = np.linspace(-4, 4, num=nb_of_xs)
xx, yy = np.meshgrid(xs1, xs2) # create the grid
# Initialize and fill the classification plane
classification_plane = np.zeros((nb_of_xs, nb_of_xs))
for i in range(nb_of_xs):
for j in range(nb_of_xs):
classification_plane[i,j] = nn_predict(np.asmatrix([xx[i,j], yy[i,j]]) , w)
# Create a color map to show the classification colors of each grid point
cmap = ListedColormap([
colorConverter.to_rgba('r', alpha=0.30),
colorConverter.to_rgba('b', alpha=0.30)])
# Plot the classification plane with decision boundary and input samples
plt.contourf(xx, yy, classification_plane, cmap=cmap)
plt.plot(x_red[:,0], x_red[:,1], 'ro', label='target red')
plt.plot(x_blue[:,0], x_blue[:,1], 'bo', label='target blue')
plt.grid()
plt.legend(loc=2)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15)
plt.title('red vs blue classification boundary')
plt.show()
#%%
| mit |
chrsrds/scikit-learn | examples/linear_model/plot_theilsen.py | 76 | 3848 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
# #############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
# #############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause |
ezekial4/atomic_neu | examples/rate_equations_diffusion.py | 1 | 1344 | # This example demonstrates the RateEquationsWithDiffusion class.
# This solves the rate equations for the ionisation stages of
# a given species evolving at different constant temperatures and
# fixed density. There is diffusion of all ionisation stages out of the system,
# and replacement by neutrals, such that the total number of
# ions stays fixed over time at 1.0.
#
# plots are shown of the time history of the stages at roughly 10eV,
# and also of the final state (after 1 second) of the ions for the various temperatures.
#
import numpy as np
import matplotlib.pyplot as plt
import atomic
ad = atomic.element('carbon')
temperature = np.logspace(0, 3, 100)
density = 1e19
tau = 1e-3
times = np.logspace(-7, 0, 120)
times -= times[0]
rt = atomic.time_dependent_rates.RateEquationsWithDiffusion(ad)
yy = rt.solve(times, temperature, density, tau)
# time evolution of ionisation states at 10eV
y_fixed_temperature = yy.at_temperature(10) # has shape (nTimes, nuclear_charge+1) = (120,7)
fig = plt.figure(1)
plt.clf()
ax = fig.add_subplot(111)
lines_ref = ax.semilogx(times, y_fixed_temperature)
ax.set_xlabel(r'$t\ [\mathrm{s}]$')
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=0)
plt.draw()
# fractional abundances at various temperatures at the last timestep.
plt.figure()
frab = yy.abundances[-1]
frab.plot_vs_temperature()
plt.show()
| mit |
manahl/mdf | mdf/tests/test_regression.py | 3 | 1840 | """
Unit tests for regression testing
"""
import unittest
import os
import pandas as pa
from datetime import datetime
import mdf.regression
from mdf import evalnode
@evalnode
def pid_test():
return os.getpid()
# used in test_regression_remnote_server_init
startup_data = {"cfg":{"paramA":"A"}}
def remote_server_init_func(startup_data):
"""
startup_data is a dict constructed by _start_pyro_subprocess
which will be passed to this callback function on the remote process.
startup_data will contain additional startup_data passed to mdf.regression.[get_contexts|run]
"""
_cfg = startup_data["cfg"]
assert _cfg["paramA"], "A"
class RemoteTest(unittest.TestCase):
def test_regression_contexts(self):
"""
simple test that creates two subprocesses and checks the
pids are different
"""
lhs, rhs = mdf.regression.get_contexts(None, None)
# test the pids for the two contexts are different
lhs_pid = lhs.get_value(pid_test)
rhs_pid = rhs.get_value(pid_test)
self.assertNotEqual(lhs_pid, rhs_pid)
def test_regression_remnote_server_init_func(self):
"""
simple test that creates two subprocesses and checks the
pids are different
"""
lhs, rhs = mdf.regression.get_contexts(None, None,
init_func=remote_server_init_func,
startup_data=startup_data)
def test_df_differ(self):
"""
tests the DataFrameDiffer
"""
date_range = pa.bdate_range(datetime.now(), periods=10)
df_differ = mdf.regression.DataFrameDiffer([pid_test])
diffs = mdf.regression.run(date_range, [df_differ], lhs=None, rhs=None)
self.assertTrue(diffs[0][0])
| mit |
jreback/pandas | pandas/tests/series/indexing/test_setitem.py | 1 | 12737 | from datetime import date
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
class TestSetitemDT64Values:
def test_setitem_none_nan(self):
series = Series(date_range("1/1/2000", periods=10))
series[3] = None
assert series[3] is NaT
series[3:5] = None
assert series[4] is NaT
series[5] = np.nan
assert series[5] is NaT
series[5:7] = np.nan
assert series[6] is NaT
def test_setitem_multiindex_empty_slice(self):
# https://github.com/pandas-dev/pandas/issues/35878
idx = MultiIndex.from_tuples([("a", 1), ("b", 2)])
result = Series([1, 2], index=idx)
expected = result.copy()
result.loc[[]] = 0
tm.assert_series_equal(result, expected)
def test_setitem_with_string_index(self):
# GH#23451
ser = Series([1, 2, 3], index=["Date", "b", "other"])
ser["Date"] = date.today()
assert ser.Date == date.today()
assert ser["Date"] == date.today()
def test_setitem_with_different_tz_casts_to_object(self):
# GH#24024
ser = Series(date_range("2000", periods=2, tz="US/Central"))
ser[0] = Timestamp("2000", tz="US/Eastern")
expected = Series(
[
Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"),
Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"),
],
dtype=object,
)
tm.assert_series_equal(ser, expected)
def test_setitem_tuple_with_datetimetz_values(self):
# GH#20441
arr = date_range("2017", periods=4, tz="US/Eastern")
index = [(0, 1), (0, 2), (0, 3), (0, 4)]
result = Series(arr, index=index)
expected = result.copy()
result[(0, 1)] = np.nan
expected.iloc[0] = np.nan
tm.assert_series_equal(result, expected)
class TestSetitemPeriodDtype:
@pytest.mark.parametrize("na_val", [None, np.nan])
def test_setitem_na_period_dtype_casts_to_nat(self, na_val):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
ser[3] = na_val
assert ser[3] is NaT
ser[3:5] = na_val
assert ser[4] is NaT
class TestSetitemScalarIndexer:
def test_setitem_negative_out_of_bounds(self):
ser = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
ser[-11] = "foo"
class TestSetitemSlices:
def test_setitem_slice_float_raises(self, datetime_series):
msg = (
"cannot do slice indexing on DatetimeIndex with these indexers "
r"\[{key}\] of type float"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0] = 0
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0] = 0
class TestSetitemBooleanMask:
def test_setitem_boolean(self, string_series):
mask = string_series > string_series.median()
# similar indexed series
result = string_series.copy()
result[mask] = string_series * 2
expected = string_series * 2
tm.assert_series_equal(result[mask], expected[mask])
# needs alignment
result = string_series.copy()
result[mask] = (string_series * 2)[0:5]
expected = (string_series * 2)[0:5].reindex_like(string_series)
expected[-mask] = string_series[mask]
tm.assert_series_equal(result[mask], expected[mask])
def test_setitem_boolean_corner(self, datetime_series):
ts = datetime_series
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
msg = (
r"Unalignable boolean Series provided as indexer \(index of "
r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted] = 1
with pytest.raises(IndexingError, match=msg):
ts.loc[mask_shifted] = 1
def test_setitem_boolean_different_order(self, string_series):
ordered = string_series.sort_values()
copy = string_series.copy()
copy[ordered > 0] = 0
expected = string_series.copy()
expected[expected > 0] = 0
tm.assert_series_equal(copy, expected)
@pytest.mark.parametrize("func", [list, np.array, Series])
def test_setitem_boolean_python_list(self, func):
# GH19406
ser = Series([None, "b", None])
mask = func([True, False, True])
ser[mask] = ["a", "c"]
expected = Series(["a", "b", "c"])
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize("value", [None, NaT, np.nan])
def test_setitem_boolean_td64_values_cast_na(self, value):
# GH#18586
series = Series([0, 1, 2], dtype="timedelta64[ns]")
mask = series == series[0]
series[mask] = value
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):
# GH: 26468
ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)
tm.assert_series_equal(ser, expected)
ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
tm.assert_series_equal(ser, expected)
ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
loc_ser = Series(range(4), dtype=any_numeric_dtype)
ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]
tm.assert_series_equal(ser, expected)
class TestSetitemViewCopySemantics:
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz Series inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = date_range("20130101", periods=3, tz="US/Eastern")
ts = dti[1]
ser = Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti.freq == "D"
ser.iloc[1] = NaT
assert ser._values.freq is None
# check that the DatetimeIndex was not altered in place
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti[1] == ts
assert dti.freq == "D"
def test_dt64tz_setitem_does_not_mutate_dti(self):
# GH#21907, GH#24096
dti = date_range("2016-01-01", periods=10, tz="US/Pacific")
ts = dti[0]
ser = Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert ser._mgr.blocks[0].values is not dti
assert ser._mgr.blocks[0].values._data.base is not dti._data._data.base
ser[::3] = NaT
assert ser[0] is NaT
assert dti[0] == ts
class TestSetitemCallable:
def test_setitem_callable_key(self):
# GH#12533
ser = Series([1, 2, 3, 4], index=list("ABCD"))
ser[lambda x: "A"] = -1
expected = Series([-1, 2, 3, 4], index=list("ABCD"))
tm.assert_series_equal(ser, expected)
def test_setitem_callable_other(self):
# GH#13299
inc = lambda x: x + 1
ser = Series([1, 2, -1, 4])
ser[ser < 0] = inc
expected = Series([1, 2, inc, 4])
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"obj,expected,key",
[
(
# these induce dtype changes
Series([2, 3, 4, 5, 6, 7, 8, 9, 10]),
Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]),
slice(None, None, 2),
),
(
# gets coerced to float, right?
Series([True, True, False, False]),
Series([np.nan, 1, np.nan, 0]),
slice(None, None, 2),
),
(
# these induce dtype changes
Series(np.arange(10)),
Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]),
slice(None, 5),
),
(
# changes dtype GH#4463
Series([1, 2, 3]),
Series([np.nan, 2, 3]),
0,
),
(
# changes dtype GH#4463
Series([False]),
Series([np.nan]),
0,
),
(
# changes dtype GH#4463
Series([False, True]),
Series([np.nan, 1.0]),
0,
),
],
)
class TestSetitemCastingEquivalents:
"""
Check each of several methods that _should_ be equivalent to `obj[key] = np.nan`
We assume that
- obj.index is the default Index(range(len(obj)))
- the setitem does not expand the obj
"""
def test_int_key(self, obj, key, expected, indexer_sli):
if not isinstance(key, int):
return
obj = obj.copy()
indexer_sli(obj)[key] = np.nan
tm.assert_series_equal(obj, expected)
def test_slice_key(self, obj, key, expected, indexer_si):
# Note: no .loc because that handles slice edges differently
obj = obj.copy()
indexer_si(obj)[key] = np.nan
tm.assert_series_equal(obj, expected)
def test_intlist_key(self, obj, key, expected, indexer_sli):
ilkey = list(range(len(obj)))[key]
obj = obj.copy()
indexer_sli(obj)[ilkey] = np.nan
tm.assert_series_equal(obj, expected)
def test_mask_key(self, obj, key, expected, indexer_sli):
# setitem with boolean mask
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
obj = obj.copy()
indexer_sli(obj)[mask] = np.nan
tm.assert_series_equal(obj, expected)
def test_series_where(self, obj, key, expected):
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
obj = obj.copy()
res = obj.where(~mask, np.nan)
tm.assert_series_equal(res, expected)
def test_index_where(self, obj, key, expected, request):
if obj.dtype == bool:
msg = "Index/Series casting behavior inconsistent GH#38692"
mark = pytest.xfail(reason=msg)
request.node.add_marker(mark)
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
res = Index(obj).where(~mask, np.nan)
tm.assert_index_equal(res, Index(expected))
@pytest.mark.xfail(reason="Index/Series casting behavior inconsistent GH#38692")
def test_index_putmask(self, obj, key, expected):
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
res = Index(obj).putmask(mask, np.nan)
tm.assert_index_equal(res, Index(expected))
class TestSetitemWithExpansion:
def test_setitem_empty_series(self):
# GH#10193
key = Timestamp("2012-01-01")
series = Series(dtype=object)
series[key] = 47
expected = Series(47, [key])
tm.assert_series_equal(series, expected)
def test_setitem_empty_series_datetimeindex_preserves_freq(self):
# GH#33573 our index should retain its freq
series = Series([], DatetimeIndex([], freq="D"), dtype=object)
key = Timestamp("2012-01-01")
series[key] = 47
expected = Series(47, DatetimeIndex([key], freq="D"))
tm.assert_series_equal(series, expected)
assert series.index.freq == expected.index.freq
def test_setitem_scalar_into_readonly_backing_data():
# GH#14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
for n in range(len(series)):
msg = "assignment destination is read-only"
with pytest.raises(ValueError, match=msg):
series[n] = 1
assert array[n] == 0
def test_setitem_slice_into_readonly_backing_data():
# GH#14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
msg = "assignment destination is read-only"
with pytest.raises(ValueError, match=msg):
series[1:3] = 1
assert not array.any()
| bsd-3-clause |
andim/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
balazssimon/ml-playground | udemy/Deep Learning A-Z/Volume 2 - Unsupervised Deep Learning/Part 4 - Self Organizing Maps (SOM)/ann.py | 2 | 2293 | # Artificial Neural Network
# Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
# Installing Tensorflow
# pip install tensorflow
# Installing Keras
# pip install --upgrade keras
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | apache-2.0 |
robbymeals/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
timsnyder/bokeh | bokeh/models/sources.py | 1 | 29941 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import warnings
# External imports
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, PandasDataFrame, PandasGroupBy, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_array
from ..util.warnings import BokehUserWarning
from .callbacks import Callback, CustomJS
from .filters import Filter
from .selections import Selection, SelectionPolicy, UnionRenderers
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ServerSentDataSource',
'AjaxDataSource',
'CDSView',
'ColumnarDataSource',
'ColumnDataSource',
'DataSource',
'GeoJSONDataSource',
'RemoteSource',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Instance(Selection, default=lambda: Selection(), help="""
A Selection that indicates selected indices on this ``DataSource``.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
.. note:
This property is left for backwards compatibility, but may be deprecated
in the future. Prefer ``source.selected.js_on_change(...)`` for new code.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
selection_policy = Instance(SelectionPolicy, default=lambda: UnionRenderers(), help="""
An instance of a ``SelectionPolicy`` that determines how selections are set.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ``ColumnDataSource`` initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
.. note::
``ColumnDataSource`` only creates a shallow copy of ``data``. Use e.g.
``ColumnDataSource(copy.deepcopy(data))`` if initializing from another
``ColumnDataSource.data`` object that you want to keep independent.
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` columns have multiple levels,
they will be flattened using an underscore (e.g. level_0_col_level_1_col).
The index of the ``DataFrame`` will be flattened to an ``Index`` of tuples
if it's a ``MultiIndex``, and then reset using ``reset_index``. The result
will be a column with the same name if the index was named, or
level_0_name_level_1_name if it was a named ``MultiIndex``. If the
``Index`` did not have a name or the ``MultiIndex`` name could not be
flattened/determined, the ``reset_index`` function will name the index column
``index``, or ``level_0`` if the name ``index`` is not available.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped original columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The columns can be, e.g,
Python lists or tuples, NumPy arrays, etc.
The .data attribute can also be set from Pandas DataFrames or GroupBy
objects. In these cases, the behaviour is identical to passing the objects
to the ``ColumnDataSource`` initializer.
""").accepts(
PandasDataFrame, lambda x: ColumnDataSource._data_from_df(x)
).accepts(
PandasGroupBy, lambda x: ColumnDataSource._data_from_groupby(x)
).asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
``pandas.DataFrame``, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
self.data.update(raw_data)
@property
def column_names(self):
''' A list of the column names in this data source.
'''
return list(self.data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
# Flatten columns
if isinstance(df.columns, pd.MultiIndex):
try:
_df.columns = ['_'.join(col) for col in _df.columns.values]
except TypeError:
raise TypeError('Could not flatten MultiIndex columns. '
'use string column names or flatten manually')
# Transform columns CategoricalIndex in list
if isinstance(df.columns, pd.CategoricalIndex):
_df.columns = df.columns.tolist()
# Flatten index
index_name = ColumnDataSource._df_index_name(df)
if index_name == 'index':
_df.index = pd.Index(_df.index.values)
else:
_df.index = pd.Index(_df.index.values, name=index_name)
_df.reset_index(inplace=True)
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
new_data[k] = v
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@staticmethod
def _df_index_name(df):
''' Return the Bokeh-appropriate column name for a ``DataFrame`` index
If there is no named index, then `"index" is returned.
If there is a single named index, then ``df.index.name`` is returned.
If there is a multi-index, and the index names are all strings, then
the names are joined with '_' and the result is returned, e.g. for a
multi-index ``['ind1', 'ind2']`` the result will be "ind1_ind2".
Otherwise if any index name is not a string, the fallback name "index"
is returned.
Args:
df (DataFrame) : the ``DataFrame`` to find an index name for
Returns:
str
'''
if df.index.name:
return df.index.name
elif df.index.names:
try:
return "_".join(df.index.names)
except TypeError:
return "index"
else:
return "index"
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ``ColumnDataSource``.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas ``DataFrame``.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
# calls internal implementation
self._stream(new_data, rollover)
def _stream(self, new_data, rollover=None, setter=None):
''' Internal implementation to efficiently update data source columns
with new append-only data. The internal implementation adds the setter
attribute. [https://github.com/bokeh/bokeh/issues/6577]
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq] or DataFrame or Series) : a mapping of
column names to sequences of new data to append to each column,
a pandas DataFrame, or a pandas Series in case of a single row -
in this case the Series index is used as column names
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
needs_length_check = True
if pd and isinstance(new_data, pd.Series):
new_data = new_data.to_frame().T
if pd and isinstance(new_data, pd.DataFrame):
needs_length_check = False # DataFrame lengths equal by definition
_df = new_data
newkeys = set(_df.columns)
index_name = ColumnDataSource._df_index_name(_df)
newkeys.add(index_name)
new_data = dict(_df.iteritems())
new_data[index_name] = _df.index.values
else:
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
import numpy as np
if needs_length_check:
lengths = set()
arr_types = (np.ndarray, pd.Series) if pd else np.ndarray
for k, x in new_data.items():
if isinstance(x, arr_types):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
# slightly awkward that we have to call convert_datetime_array here ourselves
# but the downstream code expects things to already be ms-since-epoch
for key, values in new_data.items():
if pd and isinstance(values, (pd.Series, pd.Index)):
values = values.values
old_values = self.data[key]
# Apply the transformation if the new data contains datetimes
# but the current data has already been transformed
if (isinstance(values, np.ndarray) and values.dtype.kind.lower() == 'm' and
isinstance(old_values, np.ndarray) and old_values.dtype.kind.lower() != 'm'):
new_data[key] = convert_datetime_array(values)
else:
new_data[key] = values
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
``ColumnDataSource``, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 12, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter)
class CDSView(Model):
''' A view into a ``ColumnDataSource`` that represents a row-wise subset.
'''
filters = List(Instance(Filter), default=[], help="""
List of filters that the view comprises.
""")
source = Instance(ColumnarDataSource, help="""
The ``ColumnDataSource`` associated with this view. Used to determine
the length of the columns.
""")
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently
``GeoJSONDataSource`` can only process a ``FeatureCollection`` or
``GeometryCollection``.
""")
@abstract
class WebSource(ColumnDataSource):
''' Base class for web column data sources that can update from data
URLs.
.. note::
This base class is typically not useful to instantiate on its own.
'''
adapter = Instance(CustomJS, help="""
A JavaScript callback to adapt raw JSON responses to Bokeh ``ColumnDataSource``
format.
If provided, this callback is executes immediately after the JSON data is
received, but before appending or replacing data in the data source. The
``CustomJS`` callback will receive the ``AjaxDataSource`` as ``cb_obj`` and
will receive the raw JSON response as ``cb_data.response``. The callback
code should return a ``data`` object suitable for a Bokeh ``ColumnDataSource``
(i.e. a mapping of string column names to arrays of data).
""")
max_size = Int(help="""
Maximum size of the data columns. If a new fetch would result in columns
larger than ``max_size``, then earlier data is dropped to make room.
""")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``), or to
replace existing data entirely.
""")
data_url = String(help="""
A URL to to fetch data from.
""")
@abstract
class RemoteSource(WebSource):
''' Base class for remote column data sources that can update from data
URLs at prescribed time intervals.
.. note::
This base class is typically not useful to instantiate on its own.
'''
polling_interval = Int(help="""
A polling interval (in milliseconds) for updating data source.
""")
class ServerSentDataSource(WebSource):
''' A data source that can populate columns by receiving server sent
events endpoints.
'''
class AjaxDataSource(RemoteSource):
''' A data source that can populate columns by making Ajax calls to REST
endpoints.
The ``AjaxDataSource`` can be especially useful if you want to make a
standalone document (i.e. not backed by the Bokeh server) that can still
dynamically update using an existing REST API.
The response from the REST API should match the ``.data`` property of a
standard ``ColumnDataSource``, i.e. a JSON dict that maps names to arrays
of values:
.. code-block:: python
{
'x' : [1, 2, 3, ...],
'y' : [9, 3, 2, ...]
}
Alternatively, if the REST API returns a different format, a ``CustomJS``
callback can be provided to convert the REST response into Bokeh format,
via the ``adapter`` property of this data source.
A full example can be seen at :bokeh-tree:`examples/howto/ajax_source.py`
'''
method = Enum('POST', 'GET', help="""
Specify the HTTP method to use for the Ajax request (GET or POST)
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in Ajax requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
Specify HTTP headers to set for the Ajax request.
Example:
.. code-block:: python
ajax_source.headers = { 'x-my-custom-header': 'some value' }
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _check_slice(s):
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError("Patch slices must have start < end, got %s" % s)
if (s.start is not None and s.start < 0) or \
(s.stop is not None and s.stop < 0) or \
(s.step is not None and s.step < 0):
raise ValueError("Patch slices must have non-negative (start, stop, step) values, got %s" % s)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
PiRSquared17/stoqs | contrib/analysis/trajectory_biplots.py | 5 | 18414 | #!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2013, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__status__ = "Development"
__doc__ = '''
Script to query the database for measured parameters from the same instantpoint and to
make scatter plots of temporal segments of the data. A simplified trackline of the
trajectory data and the start time of the temporal segment are added to each plot.
Make use of STOQS metadata to make it as simple as possible to use this script for
different platforms, parameters, and campaigns.
Mike McCann
MBARI Dec 6, 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE']='settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is one dir up
import re
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import DAILY
from datetime import datetime, timedelta
from django.contrib.gis.geos import LineString, Point
from utils.utils import round_to_n
from textwrap import wrap
from mpl_toolkits.basemap import Basemap
import matplotlib.gridspec as gridspec
from contrib.analysis import BiPlot, NoPPDataException, NoTSDataException
class PlatformsBiPlot(BiPlot):
'''
Make customized BiPlots (Parameter Parameter plots) for platforms from STOQS.
'''
def ppSubPlot(self, x, y, platform, color, xParm, yParm, ax, startTime):
'''
Given names of platform, x & y paramters add a subplot to figure fig.
'''
xmin, xmax, xUnits = self._getAxisInfo(platform, xParm)
ymin, ymax, yUnits = self._getAxisInfo(platform, yParm)
# Make the plot
ax.set_xlim(round_to_n(xmin, 1), round_to_n(xmax, 1))
ax.set_ylim(round_to_n(ymin, 1), round_to_n(ymax, 1))
if self.args.xLabel == '':
ax.set_xticks([])
elif self.args.xLabel:
ax.set_xlabel(self.args.xLabel)
else:
ax.set_xlabel('%s (%s)' % (xParm, xUnits))
if self.args.yLabel == '':
ax.set_yticks([])
elif self.args.yLabel:
ax.set_ylabel(self.args.yLabel)
else:
ax.set_ylabel('%s (%s)' % (yParm, yUnits))
ax.scatter(x, y, marker='.', s=10, c='k', lw = 0, clip_on=True)
ax.text(0.0, 1.0, platform, transform=ax.transAxes, color=color, horizontalalignment='left', verticalalignment='top')
return ax
def timeSubPlot(self, platformDTHash, ax1, allActivityStartTime, allActivityEndTime, startTime, endTime, swrTS):
'''
Make subplot of depth time series for all the platforms and highlight the time range
'''
for pl, ats in platformDTHash.iteritems():
color = self._getColor(pl)
for a, ts in ats.iteritems():
datetimeList = []
depths = []
for ems, d in ts:
datetimeList.append(datetime.utcfromtimestamp(ems/1000.0))
depths.append(d)
ax1.plot_date(matplotlib.dates.date2num(datetimeList), depths, '-', c=color, alpha=0.2)
# Highlight the selected time extent
ax1.axvspan(*matplotlib.dates.date2num([startTime, endTime]), facecolor='k', alpha=0.6)
if self.args.minDepth is not None:
ax1.set_ylim(bottom=self.args.minDepth)
if self.args.maxDepth:
ax1.set_ylim(top=self.args.maxDepth)
ax1.set_ylim(ax1.get_ylim()[::-1])
if swrTS:
# Plot short wave radiometer data
if self.args.verbose: print 'Plotting swrTS...'
ax2 = ax1.twinx()
ax2.plot_date(matplotlib.dates.date2num(swrTS[0]), swrTS[1], '-', c='black', alpha=0.5)
ax2.set_ylabel('SWR (W/m^2)')
plt.locator_params(axis='y', nbins=3)
ax1.set_xlabel('Time (GMT)')
ax1.set_ylabel('Depth (m)')
loc = ax1.xaxis.get_major_locator()
loc.maxticks[DAILY] = 4
return ax1
def spatialSubPlot(self, platformLineStringHash, ax, e, resolution='l'):
'''
Make subplot of tracks for all the platforms within the time range.
'''
m = Basemap(llcrnrlon=e[0], llcrnrlat=e[1], urcrnrlon=e[2], urcrnrlat=e[3], projection='cyl', resolution=resolution, ax=ax)
##m.wmsimage('http://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?', layers=['GEBCO_08_Grid']) # Works, but coarse
m.arcgisimage(server='http://services.arcgisonline.com/ArcGIS', service='Ocean_Basemap')
for pl, LS in platformLineStringHash.iteritems():
x,y = zip(*LS)
m.plot(x, y, '-', c=self._getColor(pl), linewidth=3)
if self.args.mapLabels:
m.drawparallels(np.linspace(e[1],e[3],num=3), labels=[True,False,False,False], linewidth=0)
m.drawmeridians(np.linspace(e[0],e[2],num=3), labels=[False,False,False,True], linewidth=0)
return ax
def getFilename(self, startTime):
'''
Construct plot file name
'''
if self.args.title:
p = re.compile('[\s()]')
fnTempl = p.sub('_', self.args.title) + '_{time}'
else:
fnTempl= 'platforms_{time}'
fileName = fnTempl.format(time=startTime.strftime('%Y%m%dT%H%M'))
wcName = fnTempl.format(time=r'*')
wcName = os.path.join(self.args.plotDir, self.args.plotPrefix + wcName)
if self.args.daytime:
fileName += '_day'
wcName += '_day'
if self.args.nighttime:
fileName += '_night'
wcName += '_night'
fileName += '.png'
fileName = os.path.join(self.args.plotDir, self.args.plotPrefix + fileName)
return fileName, wcName
def makePlatformsBiPlots(self):
'''
Cycle through all the platforms & parameters (there will be more than one) and make the correlation plots
for the interval as subplots on the same page. Include a map overview and timeline such that if a movie
is made of the resulting images a nice story is told. Layout of the plot page is like:
D +-------------------------------------------------------------------------------------------+
e | |
p | |
t | |
h +-------------------------------------------------------------------------------------------+
Time
+---------------------------------------+ +-------------------+-------------------+
| | | | |
| | y | | |
L | | P | | |
a | | a | Platform 0 | Platform 1 |
t | | r | | |
i | | m | | |
t | | | | |
u | | +-------------------+-------------------+
d | | | | |
e | | y | | |
| | P | | |
| | a | Platform 2 | Platform 3 |
| | r | | |
| | m | | |
| | | | |
+---------------------------------------+ +-------------------+-------------------+
Longitude xParm xParm
'''
# Nested GridSpecs for Subplots
outer_gs = gridspec.GridSpec(2, 1, height_ratios=[1,4])
time_gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer_gs[0])
lower_gs = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer_gs[1])
map_gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=lower_gs[0])
plat1_gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=lower_gs[1])
plat4_gs = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=lower_gs[1], wspace=0.0, hspace=0.0, width_ratios=[1,1], height_ratios=[1,1])
# Get overall temporal and spatial extents of platforms requested
allActivityStartTime, allActivityEndTime, allExtent = self._getActivityExtent(self.args.platform)
# Setup the time windowing and stepping - if none specified then use the entire extent that is in the database
if self.args.hourStep:
timeStep = timedelta(hours=self.args.hourStep)
if self.args.hourWindow:
timeWindow = timedelta(hours=self.args.hourWindow)
else:
if self.args.hourStep:
timeWindow = timedelta(hours=self.args.hourStep)
else:
timeWindow = allActivityEndTime - allActivityStartTime
timeStep = timeWindow
startTime = allActivityStartTime
endTime = startTime + timeWindow
# Get overall temporal data for placement in the temporal subplot
platformDTHash = self._getplatformDTHash(self.args.platform)
try:
swrTS = self._getTimeSeriesData(allActivityStartTime, allActivityEndTime, parameterStandardName='surface_downwelling_shortwave_flux_in_air')
except NoTSDataException, e:
swrTS = None
print "WARNING:", e
# Loop through sections of the data with temporal query constraints based on the window and step command line parameters
while endTime <= allActivityEndTime:
# Start a new figure - size is in inches
fig = plt.figure(figsize=(9, 6))
# Plot temporal overview
ax = plt.Subplot(fig, time_gs[:])
fig.add_subplot(ax)
if self.args.title:
ax.set_title(self.args.title)
self.timeSubPlot(platformDTHash, ax, allActivityStartTime, allActivityEndTime, startTime, endTime, swrTS)
# Make scatter plots of data from the platforms
platformLineStringHash = {}
for i, (pl, xP, yP) in enumerate(zip(self.args.platform, self.args.xParm, self.args.yParm)):
try:
if self.args.verbose: print 'Calling self._getMeasuredPPData...'
x, y, points = self._getMeasuredPPData(startTime, endTime, pl, xP, yP)
platformLineStringHash[pl] = LineString(points).simplify(tolerance=.001)
except NoPPDataException, e:
if self.args.verbose: print e
x, y = ([], [])
if len(self.args.platform) == 1:
ax = plt.Subplot(fig, plat1_gs[0])
elif len(self.args.platform) < 5:
ax = plt.Subplot(fig, plat4_gs[i])
else:
raise Exception('Cannot handle more than 4 platform Parameter-Parameter plots')
fig.add_subplot(ax)
self.ppSubPlot(x, y, pl, self._getColor(pl), xP, yP, ax, startTime)
# Plot spatial
ax = plt.Subplot(fig, map_gs[:])
fig.add_subplot(ax, aspect='equal')
if self.args.verbose: print 'Calling self.spatialSubPlot()...'
self.spatialSubPlot(platformLineStringHash, ax, allExtent)
startTime = startTime + timeStep
endTime = startTime + timeWindow
provStr = 'Created with STOQS command ' + '\\\n'.join(wrap(self.commandline, width=100)) + ' on ' + datetime.now().ctime() + ' GMT'
plt.figtext(0.0, 0.0, provStr, size=7, horizontalalignment='left', verticalalignment='bottom')
fileName, wcName = self.getFilename(startTime)
print 'Saving to file', fileName
fig.savefig(fileName)
plt.clf()
plt.close()
##raw_input('P')
print 'Done.'
print 'Make an animated gif with: convert -delay 10 {wcName}.png {baseName}.gif'.format(wcName=wcName, baseName='_'.join(fileName.split('_')[:-1]))
print 'Make an MPEG 4 with: ffmpeg -r 10 -i {baseName}.gif -vcodec mpeg4 -qscale 1 -y {baseName}.mp4'.format(baseName='_'.join(fileName.split('_')[:-1]))
print 'On a Mac open the .mp4 file in QuickTime Player and export the file for "iPad, iPhone & Apple TV" (.m4v format) for best portability.'
def process_command_line(self):
'''
The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += sys.argv[0] + " -d stoqs_september2013 -p tethys Slocum_294 daphne Slocum_260 -x bb650 optical_backscatter660nm bb650 optical_backscatter700nm -y chlorophyll fluorescence chlorophyll fluorescence --plotDir /tmp --plotPrefix stoqs_september2013_ --hourStep 1 --hourWindow 2 --xLabel '' --yLabel '' --title 'Fl vs. bb (red)' --minDepth 0 --maxDepth 100\n"
examples += sys.argv[0] + ' -d stoqs_september2013_o -p dorado Slocum_294 tethys -x bbp420 optical_backscatter470nm bb470 -y fl700_uncorr fluorescence chlorophyll\n'
examples += sys.argv[0] + ' -d stoqs_march2013_o -p daphne tethys -x bb470 bb470 -y chlorophyll chlorophyll --hourStep 6 --hourWindow 12\n'
examples += '\n\nMultiple platform and parameter names are paired up in respective order.\n'
examples += '\nIf running from cde-package replace ".py" with ".py.cde" in the above list.'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Read Parameter-Parameter data from a STOQS database and make bi-plots',
epilog=examples)
parser.add_argument('-x', '--xParm', action='store', help='One or more Parameter names for the X axis', nargs='*', default='bb470', required=True)
parser.add_argument('-y', '--yParm', action='store', help='One or more Parameter names for the Y axis', nargs='*', default='chlorophyll', required=True)
parser.add_argument('-p', '--platform', action='store', help='One or more platform names separated by spaces', nargs='*', default='tethys', required=True)
parser.add_argument('-d', '--database', action='store', help='Database alias', default='stoqs_september2013_o', required=True)
parser.add_argument('--hourWindow', action='store', help='Window in hours for interval plot. If not specified it will be the same as hourStep.', type=int)
parser.add_argument('--hourStep', action='store', help='Step though the time series and make plots at this hour interval', type=int)
parser.add_argument('--daytime', action='store_true', help='Select only daytime hours: 10 am to 2 pm local time')
parser.add_argument('--nighttime', action='store_true', help='Select only nighttime hours: 10 pm to 2 am local time')
parser.add_argument('--minDepth', action='store', help='Minimum depth for data queries', default=None, type=float)
parser.add_argument('--maxDepth', action='store', help='Maximum depth for data queries', default=None, type=float)
parser.add_argument('--plotDir', action='store', help='Directory where to write the plot output', default='.')
parser.add_argument('--plotPrefix', action='store', help='Prefix to use in naming plot files', default='')
parser.add_argument('--xLabel', action='store', help='Override Parameter-Parameter X axis label - will be applied to all plots')
parser.add_argument('--yLabel', action='store', help='Override Parameter-Parameter Y axis label - will be applied to all plots')
parser.add_argument('--mapLabels', action='store_true', help='Put latitude and longitude labels and tics on the map')
parser.add_argument('--platformColors', action='store', help='Override database platform colors - put in quotes, e.g. "#ff0000"', nargs='*')
parser.add_argument('--title', action='store', help='Title to appear on top of plot')
parser.add_argument('--extend', action='store', help='Extend the data extent for the map boundaries by this value in degrees', type=float)
parser.add_argument('--extent', action='store', help='Space separated specific map boundary in degrees: ll_lon ll_lat ur_lon ur_lat', nargs=4, default=[])
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1)
self.args = parser.parse_args()
self.commandline = ""
for item in sys.argv:
if item == '':
# Preserve empty string specifications in the command line
self.commandline += "''" + ' '
else:
self.commandline += item + ' '
if __name__ == '__main__':
bp = PlatformsBiPlot()
bp.process_command_line()
if len(bp.args.platform) > 0:
bp.makePlatformsBiPlots()
else:
bp.makePlatformsBiPlots()
| gpl-3.0 |
mljar/mljar-examples | auto-trading-numerai/main.py | 1 | 2271 | '''
Example how to make auto-trading script on Numer.ai data.
It is using MLJAR for model training.
'''
import os
import pandas as pd
import numerapi # API to interact with Numer.ai
from mljar import Mljar # API to build model
# your credentials from numer.ai
NUMERAI_USER = '######your mail#######'
NUMERAI_PASS = '######your pass#######'
# files with Numer.ai data
TRAIN_FNAME = './numerai_training_data.csv'
TEST_FNAME = './numerai_tournament_data.csv'
# file with our predictions
PREDICTIONS_FNAME = './mljar-prediction-raw-data.csv'
def some_tricky_unsupervised_preprocessing(X_train, X_test):
# do some magic here
return X_train, X_test
def main():
# login to numerai to get token
print 'Login into Numer.ai'
api = numerapi.NumerAPI(NUMERAI_USER, NUMERAI_PASS)
# get datasets
print 'Get dataset'
if not os.path.isfile(TRAIN_FNAME):
api.download_current_dataset(dest_path='.', unzip=True)
# read datasets
train = pd.read_csv(TRAIN_FNAME)
test = pd.read_csv(TEST_FNAME)
print 'Numer.ai data downloaded'
print 'Train shape', train.shape, 'test shape', test.shape
X_train = train[train.columns[:50]]
y_train = train['target']
X_test = test
print 'Create MLJAR project and experiment'
models = Mljar(project='Auto-trading', experiment="Raw data",
metric='logloss',
validation_kfolds=5, # we will use 5-fold CV with stratify and shuffle
validation_shuffle=True,
validation_stratify=True,
algorithms=['xgb', 'lgb', 'mlp'], # select Xgboost, LightGBM and Neural Network
tuning_mode='Normal', # number of models to be checked for each algorithm
single_algorithm_time_limit=5) # 5 minutes for training single model
print 'Train models:'
# fit models - that's all, only one line of code ;)
models.fit(X_train, y_train)
# get predictions on test data
predictions = models.predict(X_test)
# save predictions to file
predictions.to_csv(PREDICTIONS_FNAME, index=False)
result = api.upload_prediction(PREDICTIONS_FNAME)
print 'Your score:', result['submission']['accuracy_score']
if __name__=="__main__":
main()
| apache-2.0 |
bsipocz/statsmodels | statsmodels/stats/tests/test_weightstats.py | 30 | 21864 | '''tests for weightstats, compares with replication
no failures but needs cleanup
update 2012-09-09:
added test after fixing bug in covariance
TODOs:
- I don't remember what all the commented out code is doing
- should be refactored to use generator or inherited tests
- still gaps in test coverage
- value/diff in ttest_ind is tested in test_tost.py
- what about pandas data structures?
Author: Josef Perktold
License: BSD (3-clause)
'''
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
from statsmodels.stats.weightstats import \
DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint
#import statsmodels.stats.weightstats as smws
class Holder(object):
pass
class TestWeightstats(object):
def __init__(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1)
x2 = m2 + np.random.randn(n2)
x1_2d = m1 + np.random.randn(n1, 3)
x2_2d = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.x1_2d, self.x2_2d = x1_2d, x2_2d
def test_weightstats_1(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
w1_ = 2. * np.ones(len(x1))
w2_ = 2. * np.ones(len(x2))
d1 = DescrStatsW(x1)
# print ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal')
# #print ttest_ind(x1, x2, usevar='unequal')
# print stats.ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal', alternative='larger')
# print ttest_ind(x1, x2, usevar='unequal', alternative='smaller')
# print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_))
# print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2])
assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2],
stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]))
def test_weightstats_2(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
d1 = DescrStatsW(x1)
d1w = DescrStatsW(x1, weights=w1)
d2w = DescrStatsW(x2, weights=w2)
x1r = d1w.asrepeats()
x2r = d2w.asrepeats()
# print 'random weights'
# print ttest_ind(x1, x2, weights=(w1, w2))
# print stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
stats.ttest_ind(x1r, x2r), 14)
#not the same as new version with random weights/replication
# assert x1r.shape[0] == d1w.sum_weights
# assert x2r.shape[0] == d2w.sum_weights
assert_almost_equal(x2r.mean(0), d2w.mean, 14)
assert_almost_equal(x2r.var(), d2w.var, 14)
assert_almost_equal(x2r.std(), d2w.std, 14)
#note: the following is for 1d
assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14)
#assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19)
#TODO: exception in corrcoef (scalar case)
#one-sample tests
# print d1.ttest_mean(3)
# print stats.ttest_1samp(x1, 3)
# print d1w.ttest_mean(3)
# print stats.ttest_1samp(x1r, 3)
assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11)
assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11)
def test_weightstats_3(self):
x1_2d, x2_2d = self.x1_2d, self.x2_2d
w1, w2 = self.w1, self.w2
d1w_2d = DescrStatsW(x1_2d, weights=w1)
d2w_2d = DescrStatsW(x2_2d, weights=w2)
x1r_2d = d1w_2d.asrepeats()
x2r_2d = d2w_2d.asrepeats()
assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14)
assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14)
assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14)
assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14)
assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14)
# print d1w_2d.ttest_mean(3)
# #scipy.stats.ttest is also vectorized
# print stats.ttest_1samp(x1r_2d, 3)
t,p,d = d1w_2d.ttest_mean(3)
assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11)
#print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T]
cm = CompareMeans(d1w_2d, d2w_2d)
ressm = cm.ttest_ind()
resss = stats.ttest_ind(x1r_2d, x2r_2d)
assert_almost_equal(ressm[:2], resss, 14)
## #doesn't work for 2d, levene doesn't use weights
## cm = CompareMeans(d1w_2d, d2w_2d)
## ressm = cm.test_equal_var()
## resss = stats.levene(x1r_2d, x2r_2d)
## assert_almost_equal(ressm[:2], resss, 14)
def test_weightstats_ddof_tests(self):
# explicit test that ttest and confint are independent of ddof
# one sample case
x1_2d = self.x1_2d
w1 = self.w1
d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0)
d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1)
d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2)
#check confint independent of user ddof
res0 = d1w_d0.ttest_mean()
res1 = d1w_d1.ttest_mean()
res2 = d1w_d2.ttest_mean()
# concatenate into one array with np.r_
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
res0 = d1w_d0.ttest_mean(0.5)
res1 = d1w_d1.ttest_mean(0.5)
res2 = d1w_d2.ttest_mean(0.5)
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
#check confint independent of user ddof
res0 = d1w_d0.tconfint_mean()
res1 = d1w_d1.tconfint_mean()
res2 = d1w_d2.tconfint_mean()
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
class CheckWeightstats1dMixin(object):
def test_basic(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(x1r.mean(0), d1w.mean, 14)
assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14)
assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14)
var1 = d1w.var_ddof(ddof=1)
assert_almost_equal(x1r.var(0, ddof=1), var1, 14)
std1 = d1w.std_ddof(ddof=1)
assert_almost_equal(x1r.std(0, ddof=1), std1, 14)
assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14)
#
#assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
def test_ttest(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(d1w.ttest_mean(3)[:2],
stats.ttest_1samp(x1r, 3), 11)
# def
# assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
# stats.ttest_ind(x1r, x2r), 14)
def test_ttest_2sample(self):
x1, x2 = self.x1, self.x2
x1r, x2r = self.x1r, self.x2r
w1, w2 = self.w1, self.w2
#Note: stats.ttest_ind handles 2d/nd arguments
res_sp = stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
res_sp, 14)
#check correct ttest independent of user ddof
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=0))
cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
res0 = cm0.ttest_ind(usevar='unequal')
res1 = cm1.ttest_ind(usevar='unequal')
res2 = cm2.ttest_ind(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
#check confint independent of user ddof
res0 = cm0.tconfint_diff(usevar='pooled')
res1 = cm1.tconfint_diff(usevar='pooled')
res2 = cm2.tconfint_diff(usevar='pooled')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
res0 = cm0.tconfint_diff(usevar='unequal')
res1 = cm1.tconfint_diff(usevar='unequal')
res2 = cm2.tconfint_diff(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
def test_confint_mean(self):
#compare confint_mean with ttest
d1w = self.d1w
alpha = 0.05
low, upp = d1w.tconfint_mean()
t, p, d = d1w.ttest_mean(low)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(upp)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(np.vstack((low, upp)))
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
class CheckWeightstats2dMixin(CheckWeightstats1dMixin):
def test_corr(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
class TestWeightstats1d_ddof(CheckWeightstats1dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 1)
x2 = m2 + np.random.randn(n2, 1)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1)
self.d2w = DescrStatsW(x2, weights=w2)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_ddof(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_nobs(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,30
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=0)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
def test_ttest_ind_with_uneq_var():
#from scipy
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
def test_ztest_ztost():
# compare weightstats with separately tested proportion ztest ztost
import statsmodels.stats.proportion as smprop
x1 = [0, 1]
w1 = [5, 15]
res2 = smprop.proportions_ztest(15, 20., value=0.5)
d1 = DescrStatsW(x1, w1)
res1 = d1.ztest_mean(0.5)
assert_allclose(res1, res2, rtol=0.03, atol=0.003)
d2 = DescrStatsW(x1, np.array(w1)*21./20)
res1 = d2.ztest_mean(0.5)
assert_almost_equal(res1, res2, decimal=12)
res1 = d2.ztost_mean(0.4, 0.6)
res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6)
assert_almost_equal(res1[0], res2[0], decimal=12)
x2 = [0, 1]
w2 = [10, 10]
#d2 = DescrStatsW(x1, np.array(w1)*21./20)
d2 = DescrStatsW(x2, w2)
res1 = ztest(d1.asrepeats(), d2.asrepeats())
res2 = smprop.proportions_chisquare(np.asarray([15, 10]),
np.asarray([20., 20]))
#TODO: check this is this difference expected?, see test_proportion
assert_allclose(res1[1], res2[1], rtol=0.03)
res1a = CompareMeans(d1, d2).ztest_ind()
assert_allclose(res1a[1], res2[1], rtol=0.03)
assert_almost_equal(res1a, res1, decimal=12)
###### test for ztest and z confidence interval against R BSDA z.test
# Note: I needed to calculate the pooled standard deviation for R
# std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2)
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667)
#> cat_items(zt, "ztest.")
ztest_ = Holder()
ztest_.statistic = 6.55109865675183
ztest_.p_value = 5.711530850508982e-11
ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_.estimate = np.array([7.01818181818182, 5.2625])
ztest_.null_value = 0
ztest_.alternative = 'two.sided'
ztest_.method = 'Two-sample z-Test'
ztest_.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="less")
#> cat_items(zt, "ztest_smaller.")
ztest_smaller = Holder()
ztest_smaller.statistic = 6.55109865675183
ztest_smaller.p_value = 0.999999999971442
ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller.null_value = 0
ztest_smaller.alternative = 'less'
ztest_smaller.method = 'Two-sample z-Test'
ztest_smaller.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="greater")
#> cat_items(zt, "ztest_larger.")
ztest_larger = Holder()
ztest_larger.statistic = 6.55109865675183
ztest_larger.p_value = 2.855760072861813e-11
ztest_larger.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger.estimate = np.array([7.01818181818182, 5.2625 ])
ztest_larger.null_value = 0
ztest_larger.alternative = 'greater'
ztest_larger.method = 'Two-sample z-Test'
ztest_larger.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="two.sided")
#> cat_items(zt, "ztest_mu.")
ztest_mu = Holder()
ztest_mu.statistic = 2.81972854805176
ztest_mu.p_value = 0.00480642898427981
ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_mu.null_value = 1
ztest_mu.alternative = 'two.sided'
ztest_mu.method = 'Two-sample z-Test'
ztest_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="greater")
#> cat_items(zt, "ztest_larger_mu.")
ztest_larger_mu = Holder()
ztest_larger_mu.statistic = 2.81972854805176
ztest_larger_mu.p_value = 0.002403214492139871
ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_larger_mu.null_value = 1
ztest_larger_mu.alternative = 'greater'
ztest_larger_mu.method = 'Two-sample z-Test'
ztest_larger_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=2, alternative="less")
#> cat_items(zt, "ztest_smaller_mu.")
ztest_smaller_mu = Holder()
ztest_smaller_mu.statistic = -0.911641560648313
ztest_smaller_mu.p_value = 0.1809787183191324
ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller_mu.null_value = 2
ztest_smaller_mu.alternative = 'less'
ztest_smaller_mu.method = 'Two-sample z-Test'
ztest_smaller_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="two.sided")
#> cat_items(zt, "ztest_mu_1s.")
ztest_mu_1s = Holder()
ztest_mu_1s.statistic = 4.415212090914452
ztest_mu_1s.p_value = 1.009110038015147e-05
ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245])
ztest_mu_1s.estimate = 7.01818181818182
ztest_mu_1s.null_value = 6.4
ztest_mu_1s.alternative = 'two.sided'
ztest_mu_1s.method = 'One-sample z-Test'
ztest_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less")
#> cat_items(zt, "ztest_smaller_mu_1s.")
ztest_smaller_mu_1s = Holder()
ztest_smaller_mu_1s.statistic = -2.727042762035397
ztest_smaller_mu_1s.p_value = 0.00319523783881176
ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716])
ztest_smaller_mu_1s.estimate = 7.01818181818182
ztest_smaller_mu_1s.null_value = 7.4
ztest_smaller_mu_1s.alternative = 'less'
ztest_smaller_mu_1s.method = 'One-sample z-Test'
ztest_smaller_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater")
#> cat_items(zt, "ztest_greater_mu_1s.")
ztest_larger_mu_1s = Holder()
ztest_larger_mu_1s.statistic = 4.415212090914452
ztest_larger_mu_1s.p_value = 5.045550190097003e-06
ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan])
ztest_larger_mu_1s.estimate = 7.01818181818182
ztest_larger_mu_1s.null_value = 6.4
ztest_larger_mu_1s.alternative = 'greater'
ztest_larger_mu_1s.method = 'One-sample z-Test'
ztest_larger_mu_1s.data_name = 'x'
alternatives = {'less' : 'smaller',
'greater' : 'larger',
'two.sided' : 'two-sided'}
class TestZTest(object):
# all examples use the same data
# no weights used in tests
@classmethod
def setup_class(cls):
cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8])
cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5])
cls.d1 = DescrStatsW(cls.x1)
cls.d2 = DescrStatsW(cls.x2)
cls.cm = CompareMeans(cls.d1, cls.d2)
def test(self):
x1, x2 = self.x1, self.x2
cm = self.cm
# tc : test cases
for tc in [ztest_, ztest_smaller, ztest_larger,
ztest_mu, ztest_smaller_mu, ztest_larger_mu]:
zstat, pval = ztest(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = cm.ztest_ind(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, x2, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = cm.zconfint_diff(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = zconfint(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10)
# 1 sample test copy-paste
d1 = self.d1
for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]:
zstat, pval = ztest(x1, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = d1.ztest_mean(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = d1.zconfint_mean(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.