repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mrtommyb/mdwarf-rotation | code/mcmcrotation.py | 1 | 4825 | import numpy as np
# import matplotlib.pyplot as plt
import george
from george import kernels
import pandas as pd
from astropy.stats import median_absolute_deviation as MAD
import glob
import scipy.optimize as op
import emcee
import tqdm
import h5py
def read_file(fn):
df = pd.read_csv(fn, skiprows=39)
df['time'] = df.t - df.t[0]
df['flux'] = df.fdt_t_roll_2D / np.median(df.fdt_t_roll_2D) - 1.0
df['ferr'] = np.ones_like(df.t) * MAD(df.flux) / 20.
return df
def angus_kernel(theta):
"""
use the kernel that Ruth Angus uses. Be sure to cite her
"""
theta = np.exp(theta)
A = theta[0]
l = theta[1]
G = theta[2]
sigma = theta[4]
P = theta[3]
kernel = (A * kernels.ExpSquaredKernel(l) *
kernels.ExpSine2Kernel(G, P) +
kernels.WhiteKernel(sigma)
)
return kernel
def nll(p, args):
yval, gp = args
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(yval, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p, args):
yval, gp = args
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(yval, quiet=True)
def get_opt_params(theta, t, y, yerr):
kernel = angus_kernel(theta)
gp = george.GP(kernel, ) # solver=george.HODLRSolver)
gp.compute(t, yerr=yerr)
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll,
args=[y, gp])
gp.kernel[:] = results.x
return gp
def get_giant_outliers(y):
giant_outliers = np.abs(y) > (5 * MAD(y))
return giant_outliers
def get_flares(t, y, gp, sig=2.5):
# for some reason trying to predict at exactly the same time is a problem
mup, covp = gp.predict(y, t + (t * 0.0001))
stdp = np.sqrt(np.diag(covp))
flares = y > mup + (stdp * 2.5)
return flares
def lnprob(p, time, y, yerr):
# Trivial improper prior: uniform in the log.
# from DFM george user guide
if np.any((-p < -20) + (p > 10)):
return -np.inf
lnprior = 0.0
kernel = angus_kernel(p)
gp = george.GP(kernel)
gp.compute(time, yerr)
return lnprior + gp.lnlikelihood(y, quiet=True)
if __name__ == '__main__':
fns = glob.glob('../data/*.csv')
fn = fns[4]
df = read_file(fn)
cutdays = True
if cutdays:
timemask = df.time < 15.
else:
timemask = np.ones_like(df.time, dtype='bool')
theta_guess = [-10, 2.5 , 5, 1.3, -13]
omask = get_giant_outliers(df.flux)
mask = ~omask & timemask
gp = get_opt_params(theta_guess,
df.loc[mask, 'time'],
df.loc[mask, 'flux'],
df.loc[mask, 'ferr'])
flares = get_flares(df.time[mask],
df.flux[mask],
gp,
sig=2.5,
)
notflares = ~flares
# recalculate GP
mask = ~omask & notflares & timemask
gp = get_opt_params(gp.kernel[:],
df.loc[mask, 'time'],
df.loc[mask, 'flux'],
df.loc[mask, 'ferr'])
nwalkers, ndim = 16, len(gp.kernel[:])
outfile = 'chain.hdf5'
steps = 30
with h5py.File(outfile, u"w") as f:
c_ds = f.create_dataset("chain",
(nwalkers, steps, ndim),
dtype=np.float64)
lp_ds = f.create_dataset("lnprob",
(nwalkers, steps),
dtype=np.float64)
print('starting mcmc with params {}'.format(gp.kernel[:]))
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=[
df.loc[mask, 'time'],
df.loc[mask, 'flux'],
df.loc[mask, 'ferr'],
])
p0 = (np.repeat(gp.kernel.vector, nwalkers) +
(np.random.random(size=ndim * nwalkers) * 1.e-4))
p0 = p0.reshape(ndim, nwalkers).T
print("Running burn-in")
p0, _, _ = sampler.run_mcmc(p0, 2)
print("Running production chain")
for ind, (pos, lnprob, state) in tqdm.tqdm(enumerate(sampler.sample(
p0, iterations=steps, storechain=False))):
with h5py.File(outfile, u"a") as f:
c_ds = f["chain"]
lp_ds = f["lnprob"]
c_ds[:, ind, :] = pos
lp_ds[:, ind] = lnprob
print("Mean acceptance fraction: {0:.3f}"
.format(np.mean(sampler.acceptance_fraction)))
| mit |
moschlar/SAUCE | setup.py | 1 | 4129 | # -*- coding: utf-8 -*-
#
## SAUCE - System for AUtomated Code Evaluation
## Copyright (C) 2013 Moritz Schlarb
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Quickstarted Options:
#
# sqlalchemy: True
# auth: sqlalchemy
# mako: True
#
#
# This is just a work-around for a Python 2.7 issue causing an
# interpreter crash at exit when trying to log an info message.
try:
import logging # @UnusedImport pylint:disable=unused-import
import multiprocessing # @UnusedImport pylint:disable=unused-import
except:
pass
import os, sys
here = os.path.dirname(os.path.abspath(__file__))
from setuptools import setup, find_packages
assert sys.version_info[:2] in ((2, 6), (2, 7))
install_requires = [
'TurboGears2 >= 2.3.8',
'gearbox',
'Babel',
'Mako',
'zope.sqlalchemy >= 0.4',
'repoze.tm2 >= 1.0a5',
'sqlalchemy >= 0.8.2',
'alembic',
'repoze.who < 2.0',
'repoze.who.plugins.sa',
'repoze.who-testutil',
'repoze.who-friendlyform >= 1.0.4',
'repoze.what >= 1.0.8',
'repoze.what.plugins.sql >= 1.0.1',
'repoze.what-pylons >= 1.0',
'repoze.what-quickstart',
'tw2.core >= 2.2.1.1',
'tw2.forms >= 2.1.4.2',
'tw2.sqla',
'tw2.dynforms',
'tw2.jquery',
'tw2.bootstrap.forms >= 2.2.2.1',
'tw2.wysihtml5 >= 0.3.1',
'tw2.jqplugins.chosen >= 0.3',
'tw2.codemirror >= 0.2.1',
'tw2.pygmentize >= 0.2.1',
'tgext.admin >= 0.6.1, < 0.7',
'tgext.crud >= 0.7, < 0.8',
'sprox >= 0.9', # Dynamic form widget generation
'docutils', # For rendering documentation
'chardet', # For submission file charset detection
'bootalchemy >= 0.4.1',
'repoze.sendmail',
'bleach',
'WebHelpers',
'python-status',
]
if sys.version_info[:2] != (2, 7):
install_requires += ['ordereddict']
tests_require = [
'tg.devtools >= 2.3.8',
'WebTest >= 1.2.3, < 2.0',
'nose',
'nose-exclude',
'coverage',
'wsgiref',
'repoze.who-testutil >= 1.0.1',
'BeautifulSoup',
'sieve', # tw2.core.testbase
# 'tw2.core[tests]',
]
if sys.version_info[:2] != (2, 7):
tests_require += ['unittest2']
extras_require = {
'similarity': [
'numpy',
'matplotlib',
'libripoff >= 0.2',
],
'test': tests_require,
'tests': tests_require,
'nose': tests_require,
'nosetests': tests_require,
'sentry': ['raven'],
'shell': ['ipython'],
'lti': [
'BeautifulSoup',
'oauth2',
],
}
setup(
name='SAUCE',
version='1.8.0',
description='System for AUtomated Code Evaluation',
long_description=open(os.path.join(here, 'README.rst')).read(),
author='Moritz Schlarb',
author_email='[email protected]',
url='https://github.com/moschlar/SAUCE',
license='AGPL-3.0',
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
extras_require=extras_require,
test_suite='nose.collector',
tests_require=tests_require,
package_data={'sauce': [
'i18n/*/LC_MESSAGES/*.mo',
'templates/*/*',
'public/*/*'
]},
message_extractors={'sauce': [
('**.py', 'python', None),
('templates/**.mako', 'mako', None),
('public/**', 'ignore', None)
]},
entry_points={
'paste.app_factory': [
'main = sauce.config.middleware:make_app'
],
'gearbox.plugins': [
'turbogears-devtools = tg.devtools'
],
},
zip_safe=False,
)
| agpl-3.0 |
cbertinato/pandas | pandas/tests/util/test_validate_kwargs.py | 1 | 2016 | from collections import OrderedDict
import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = OrderedDict()
compat_args[good_arg] = "foo"
compat_args[bad_arg + "o"] = "bar"
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg))
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = "s"
compat_args["baz"] = None
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["f"] = None
compat_args["b"] = 1
compat_args["ba"] = "s"
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = ("For argument \"%s\" expected type bool, received type %s" %
(name, type(value).__name__))
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
| bsd-3-clause |
idbedead/RNA-sequence-tools | FPKM_Parsing/fpkm_df_threshold_renamecells3.py | 2 | 9111 | import os
import cPickle as pickle
import numpy as np
import pandas as pd
from collections import OrderedDict
def delete_cells(by_cell, cell_list, del_list):
to_delete1 =[]
for pos, cell_name in enumerate(cell_list):
if cell_name in del_list:
to_delete1.append(pos)
to_delete = sorted(to_delete1, reverse=True)
for pos in to_delete:
print 'Deleted specific cell '+cell_list[pos]
del cell_list[pos]
n_by_cell = np.delete(by_cell, to_delete, axis=0)
return cell_list, n_by_cell
def filter_by_mapping(path_to_align, cutoff_per_map = 100000, name_filter=False):
c_to_del =[]
if path_to_align[-2:] == '.p':
with open(path_to_align, 'rb') as fp:
a_data = pickle.load(fp)
elif path_to_align[-4:] == '.txt':
a_data = pd.DataFrame.from_csv(path_to_align, sep='\t')
p_mapped = a_data['mapped_L_num']
ind_list = p_mapped[p_mapped<cutoff_per_map]
c_to_del = ind_list.index.values
if name_filter:
new_c_to_del = []
for c in c_to_del:
c2 = c.replace('-','_')
nlist = c2.split('_')
print nlist, 'nlist'
if nlist[0] == 'pdgfra':
if nlist[2] == 'ctrl':
new_c_to_del.append('Low_ctrl_'+nlist[3])
elif nlist[2] == 'd4pnx':
new_c_to_del.append('Low_pnx_'+nlist[3])
else:
if len(nlist[0]) == 2:
new_c = 'C0'+nlist[0][-1]
else:
new_c = nlist[0]
if nlist[2] == 'ctrl1':
new_c_to_del.append('ctrl1_'+new_c)
elif nlist[2] == 'pnxd4':
new_c_to_del.append('pnx1_'+new_c)
return new_c_to_del
else:
return c_to_del
def filter_cells_sd(by_cell, cell_list, sd=3.8):
average_gene_exp = []
to_delete= []
for cell_name, genes in zip(cell_list,by_cell):
gen_exp = (genes >= 1).sum()
#if the cell doesn't express at least 500 genes just delete it and exclude from average
if gen_exp <=1000:
to_delete.append(cell_list.index(cell_name))
else:
average_gene_exp.append(gen_exp)
np_av = np.array(average_gene_exp)
averg = np.average(np_av)
gene_sd = np.std(np_av)
print averg, gene_sd
#add cells that fall outside of stdev value sd
for i1, exp_level in enumerate(np_av):
if exp_level < (averg - (gene_sd*sd)) or exp_level > (averg + (gene_sd*sd)):
if i1 not in to_delete:
to_delete.append(i1)
to_delete1 = sorted(to_delete, reverse = True)
print to_delete1
for pos in to_delete1:
print 'Deleted outlier '+cell_list[pos]
del cell_list[pos]
n_by_cell = np.delete(by_cell, to_delete1, axis=0)
print "Number of cells remaining: "+str(len(cell_list))
naverage_gene_exp = []
for ngenes in n_by_cell:
ngen_exp = (ngenes >= 1).sum()
naverage_gene_exp.append(ngen_exp)
nnp_av = np.array(naverage_gene_exp)
naverg = np.average(nnp_av)
ngene_sd = np.std(nnp_av)
print "New", naverg, ngene_sd
return cell_list, n_by_cell
def threshold_genes(by_gene, gen_list, number_expressed=3):
g_todelete = []
for g1, gene in enumerate(by_gene):
cells_exp = (gene >= 1.0).sum()
if cells_exp < number_expressed:
g_todelete.append(g1)
g1_todelete = sorted(g_todelete, reverse = True)
print by_gene.shape
for pos in g1_todelete:
if type(gen_list[pos]) != float:
#print 'Gene '+gen_list[pos]+' not expressed in '+str(number_expressed)+' cells.'
pass
del gen_list[pos]
n_by_gene = np.delete(by_gene, g1_todelete, axis=0)
print n_by_gene.shape
return gen_list, n_by_gene
#given a pandas dataframe of gene expression split out ERCC and return seperate dataframes of each
def sep_ERCC(pd_by_gene, gen_list):
w_gene_list = list(gen_list)
ERCC_list= []
pop_list =[]
to_del = []
for i, gen in enumerate(w_gene_list):
if 'ERCC-00' == gen[0:7] or 'RNASPIKE1-EC02' in gen:
pop_list.append(i)
to_del = sorted(to_del, reverse=True)
for d in to_del:
del w_gene_list[d]
pop_list = sorted(pop_list, reverse=True)
for pos in pop_list:
ERCC_list.append(w_gene_list.pop(pos))
pd_by_gene_no_ERCC = pd_by_gene[w_gene_list]
pd_ERCC = pd_by_gene[ERCC_list]
return pd_by_gene_no_ERCC.transpose(), pd_ERCC.transpose(), w_gene_list
path_to_file = '/Volumes/Seq_data/cuffnorm_sca_spc_combined'
file_name = 'genes.fpkm_table'
base_name ='scp_sca_combined'
data = pd.DataFrame.from_csv(os.path.join(path_to_file,file_name), sep='\t')
gen_list = data.index.tolist()
cell_list = [x.strip('_0') for x in list(data.columns.values)]
path_to_align=os.path.join(path_to_file,'results_sca_spc_combined_align.p')
del_list=filter_by_mapping(path_to_align, name_filter=False)
print del_list, 'del'
npdata = np.array(data.values, dtype='f')
by_cell1 = npdata.transpose()
rem_cell_list, rem_by_cell = delete_cells(by_cell1, cell_list, del_list)
npdata2 = rem_by_cell.transpose()
new_gene_list1, new_by_gene = threshold_genes(npdata2, gen_list)
by_cell = new_by_gene.transpose()
outlier_cell_list, outlier_by_cell = filter_cells_sd(by_cell, rem_cell_list)
final_by_gene = outlier_by_cell.transpose()
outlier_fpkm_dict = OrderedDict()
bulk_ctrl_dict = OrderedDict()
filter_on_lane = False
bulk = False
if filter_on_lane:
for i, l in enumerate(outlier_by_cell):
split_cell_list = outlier_cell_list[i].split('_')
cell_name = outlier_cell_list[i]
if 'neg' in cell_name or '+' in cell_name or '-' in cell_name:
print cell_name, '1'
if 'Ra' not in split_cell_list and 'pdgfra' not in split_cell_list:
print cell_name, '2'
bulk_ctrl_dict['_'.join(split_cell_list[1:])] = [float(lix) for lix in l]
elif split_cell_list[0] == 'Lane1' and 'C' in split_cell_list[1] or split_cell_list[0] == 'Lane2' and 'C' in split_cell_list[1]:
if 'Ra' not in split_cell_list and 'pdgfra' not in split_cell_list:
if len(split_cell_list[1]) == 3:
cell_name = 'CTRL_'+split_cell_list[1]
elif len(split_cell_list[1]) == 2:
cell_name = 'CTRL_'+split_cell_list[1][0]+'0'+split_cell_list[1][1]
else:
print split_cell_list[1], 'wtf'
outlier_fpkm_dict[cell_name] = [float(lx) for lx in l]
elif split_cell_list[0] == 'Lane3' and 'C' in split_cell_list[1] or split_cell_list[0] == 'Lane4'and 'C' in split_cell_list[1]:
if 'Ra' not in split_cell_list and 'pdgfra' not in split_cell_list:
if len(split_cell_list[1]) == 3:
cell_name = 'PNX_'+split_cell_list[1]
elif len(split_cell_list[1]) == 2:
cell_name = 'PNX_'+split_cell_list[1][0]+'0'+split_cell_list[1][1]
else:
print split_cell_list[1], 'wtf'
outlier_fpkm_dict[cell_name] = [float(lx) for lx in l]
else:
for i, l in enumerate(outlier_by_cell):
if bulk:
cell_name = outlier_cell_list[i]
if 'bulk' in cell_name or '+' in cell_name or 'neg' in cell_name or '-' in cell_name:
bulk_ctrl_dict[cell_name] = [float(lx) for lx in l]
else:
outlier_fpkm_dict[cell_name] = [float(lx) for lx in l]
else:
cell_name = outlier_cell_list[i]
outlier_fpkm_dict[cell_name] = [float(lx) for lx in l]
if bulk:
df_bulk = pd.DataFrame(bulk_ctrl_dict, index = new_gene_list1)
else:
pass
fpkm_df_outlier1 = pd.DataFrame(outlier_fpkm_dict, index = new_gene_list1)
mod_gen_list = list(new_gene_list1)
fpkm_df_outlier, df_ERCC, new_gene_list = sep_ERCC(fpkm_df_outlier1.transpose(), mod_gen_list)
mod_gen_list = list(new_gene_list1)
if bulk:
bulk_ctrl_df, df_bulk_ERCC, bulk_gene_list = sep_ERCC(df_bulk.transpose(), mod_gen_list)
else:
pass
outlier_cell_list = [x for x in list(fpkm_df_outlier.columns.values)]
df_ERCC.to_csv(os.path.join(path_to_file, base_name+'_ERCC.txt'), sep = '\t')
if bulk:
df_bulk_ERCC.to_csv(os.path.join(path_to_file, base_name+'_bulkonly_ERCC.txt'), sep = '\t')
bulk_ctrl_df.to_csv(os.path.join(path_to_file, base_name+'_bulk_ctrls.txt'), sep = '\t')
else:
pass
fpkm_df_outlier.to_csv(os.path.join(path_to_file, base_name+'_outlier_filtered.txt'), sep = '\t')
with open(os.path.join(path_to_file,base_name+'_outlier_by_cell.p'), 'wb') as fp1:
pickle.dump(fpkm_df_outlier.transpose(), fp1)
with open(os.path.join(path_to_file,base_name+'_outlier_cell_list.p'), 'wb') as fp2:
pickle.dump(outlier_cell_list, fp2)
with open(os.path.join(path_to_file,base_name+'_outlier_by_gene.p'), 'wb') as fp3:
pickle.dump(fpkm_df_outlier, fp3)
with open(os.path.join(path_to_file,base_name+'_outlier_gene_list.p'), 'wb') as fp4:
pickle.dump(new_gene_list, fp4)
| mit |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/SkFlow_DEPRECATED/text_classification.py | 5 | 3483 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 10
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 50
def average_model(X, y):
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
return skflow.models.logistic_regression(features, y)
def rnn_model(X, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = skflow.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, word_vectors)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = rnn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
return skflow.models.logistic_regression(encoding, y)
classifier = skflow.TensorFlowEstimator(model_fn=rnn_model, n_classes=15,
steps=1000, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_rnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
DSLituiev/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
GoogleCloudPlatform/ai-platform-samples | training/pytorch/structured/custom_containers/gpu/trainer/inputs.py | 1 | 5152 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from google.cloud import storage
import pandas as pd
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torch.utils.data.sampler import SubsetRandomSampler
import metadata
class CSVDataset(Dataset):
def __init__(self, args, csv_files, device, transform=None):
"""
Args:
args: arguments passed to the python script
csv_files (list): Path to the list of csv files with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataframe = None
for csv_file in csv_files:
if self.dataframe is None:
self.dataframe = pd.read_csv(csv_file, header=0)
else:
self.dataframe = pd.concat(
[self.dataframe, pd.read_csv(csv_file, header=0)])
self.device = device
self.transform = transform
# Convert the categorical columns in place to a numerical category
# Example: Payment_Type =
# ['Credit Card' 'Cash' 'No Charge' 'Dispute' 'Unknown']
# Converted: Payment_Type = [0, 1, 2, 3, 4]
if args.embed_categorical_columns:
for category in metadata.CATEGORICAL_COLUMNS:
self.dataframe[category].replace(
{val: i for i, val in enumerate(
self.dataframe[category].unique())},
inplace=True)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
# When retrieving an item from the dataset, get the features and the
# target. In this template, the target is 0th column and the features
# are all the other columns.
features = self.dataframe.iloc[idx, 1:].values
target = self.dataframe.iloc[idx, :1].values
# Load the data as a tensor
item = {
'features': torch.from_numpy(features).to(self.device),
'target': torch.from_numpy(target).to(self.device)
}
if self.transform:
item = self.transform(item)
return item
def load_data(args, device):
"""Loads the data into three different data loaders. (Train, Test, Evaluation)
Split the training dataset into a train / test dataset.
Args:
args: arguments passed to the python script
"""
train_dataset = CSVDataset(args, args.train_files, device)
eval_dataset = CSVDataset(args, args.eval_files, device)
# Determine the size of the dataset and the train/test sets
dataset_size = len(train_dataset)
test_size = int(args.test_split * dataset_size)
train_size = dataset_size - test_size
# Use random_split to get the split indices for the train/test set
train_dataset, test_dataset = random_split(train_dataset,
[train_size, test_size])
# Use the subset random sampler for the dataloader to know which
# parts of the dataset belong to the train/test set
# Note: use `tolist()` to convert the indices tensor to a list or
# enumerating over the DataLoader will fail.
train_sampler = SubsetRandomSampler(train_dataset.indices)
test_sampler = SubsetRandomSampler(test_dataset.indices)
# Create the data loaders with the train/test sets.
train_loader = DataLoader(
train_dataset.dataset,
batch_size=args.batch_size,
sampler=train_sampler)
test_loader = DataLoader(
test_dataset.dataset,
batch_size=args.batch_size,
sampler=test_sampler)
# Create data loader with the eval set
eval_loader = DataLoader(
eval_dataset,
batch_size=args.batch_size)
return train_loader, test_loader, eval_loader
def save_model(args):
"""Saves the model to Google Cloud Storage
Args:
args: contains name for saved model.
"""
scheme = 'gs://'
bucket_name = args.job_dir[len(scheme):].split('/')[0]
prefix = '{}{}/'.format(scheme, bucket_name)
bucket_path = args.job_dir[len(prefix):].rstrip('/')
datetime_ = datetime.datetime.now().strftime('model_%Y%m%d_%H%M%S')
if bucket_path:
model_path = '{}/{}/{}'.format(bucket_path, datetime_, args.model_name)
else:
model_path = '{}/{}'.format(datetime_, args.model_name)
bucket = storage.Client().bucket(bucket_name)
blob = bucket.blob(model_path)
blob.upload_from_filename(args.model_name)
| apache-2.0 |
kkreis/espressopp | contrib/mpi4py/mpi4py-2.0.0/demo/mandelbrot/mandelbrot-master.py | 11 | 1466 | from mpi4py import MPI
import numpy as np
x1 = -2.0
x2 = 1.0
y1 = -1.0
y2 = 1.0
w = 600
h = 400
maxit = 255
import os
dirname = os.path.abspath(os.path.dirname(__file__))
executable = os.path.join(dirname, 'mandelbrot-worker.exe')
# spawn worker
worker = MPI.COMM_SELF.Spawn(executable, maxprocs=7)
size = worker.Get_remote_size()
# send parameters
rmsg = np.array([x1, x2, y1, y2], dtype='f')
imsg = np.array([w, h, maxit], dtype='i')
worker.Bcast([rmsg, MPI.REAL], root=MPI.ROOT)
worker.Bcast([imsg, MPI.INTEGER], root=MPI.ROOT)
# gather results
counts = np.empty(size, dtype='i')
indices = np.empty(h, dtype='i')
cdata = np.empty([h, w], dtype='i')
worker.Gather(sendbuf=None,
recvbuf=[counts, MPI.INTEGER],
root=MPI.ROOT)
worker.Gatherv(sendbuf=None,
recvbuf=[indices, (counts, None), MPI.INTEGER],
root=MPI.ROOT)
worker.Gatherv(sendbuf=None,
recvbuf=[cdata, (counts * w, None), MPI.INTEGER],
root=MPI.ROOT)
# disconnect worker
worker.Disconnect()
# reconstruct full result
M = np.zeros([h, w], dtype='i')
M[indices, :] = cdata
# eye candy (requires matplotlib)
try:
from matplotlib import pyplot as plt
plt.imshow(M, aspect='equal')
plt.spectral()
try:
import signal
def action(*args): raise SystemExit
signal.signal(signal.SIGALRM, action)
signal.alarm(2)
except:
pass
plt.show()
except:
pass
| gpl-3.0 |
okadate/romspy | romspy/make/make_frc_file.py | 1 | 3270 | # coding: utf-8
"""
Program to make frcing nc files from observations at osaka
2014-10-21 okada make this file.
"""
import netCDF4
from numpy import dtype
import pandas as pd
import datetime
import numpy as np
def _parse(date, hour):
"""
parser for time in csvfile
"""
dt = datetime.datetime.strptime(date, '%Y/%m/%d')
delta = datetime.timedelta(hours=float(hour))
return dt + delta
def read_osaka(csvfile):
"""
csvfile is from dbfile.
"""
global time_out
data = pd.read_csv(csvfile, parse_dates=[['date', 'hour']], date_parser=_parse,
a_values='--')
data = data.interpolate()
data = data.fillna(method='bfill')
data['mjd'] = data['date_hour'].apply(
lambda t: netCDF4.date2num(t, 'days since 1968-05-23 09:00:00 GMT'))
print data.describe()
# set data
nt = len(data)
time_out = data.mjd.values
tair_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
pair_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
qair_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
cloud_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
rain_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
swrad_out = np.ndarray(shape=[nt, eta_rho, xi_rho])
for eta in xrange(eta_rho):
for xi in xrange(xi_rho):
tair_out[:, eta, xi] = data.temperature # C
pair_out[:, eta, xi] = data.air_pressure # 1 millibar = 1 hPa
qair_out[:, eta, xi] = data.humidity # %
cloud_out[:, eta, xi] = data.cloud/10.0 # 0-10
rain_out[:, eta, xi] = data.precipitation/3600.0 # 1 kg/m2/s = 3600 mm/h
swrad_out[:, eta, xi] = data.radiation*10**6/3600 # 1 W/m2 = 3600/(10**6) MJ/m2/h
return tair_out, pair_out, qair_out, cloud_out, rain_out, swrad_out
def make_nc(filename, varname, var_out, units):
nc = netCDF4.Dataset(filename.format(varname), 'w', format='NETCDF3_CLASSIC')
now = datetime.datetime.now()
nc.history = now.strftime('%Y-%m-%d %H:%M:%S')
nc.author = 'OKADA Teruhisa'
nc.createDimension('xi_rho', xi_rho)
nc.createDimension('eta_rho', eta_rho)
nc.createDimension('time_hourly', None)
time = nc.createVariable('time_hourly', dtype('double').char, ('time_hourly',) )
var = nc.createVariable( varname, dtype('float32').char, ('time_hourly', 'eta_rho', 'xi_rho') )
time.units = 'days since 1968-05-23 00:00:00 GMT'
var.units = units
time[:] = time_out
var[:,:,:] = var_out
var.time = 'time_hourly'
nc.close()
def make_frc_file():
global xi_rho, eta_rho
xi_rho = 117
eta_rho = 124
csvfile = '/home/work/okada/OB500/Forcing/jma_osaka/weather_24h/osaka_L.csv'
frcfile = 'ob500_frc_{}_2012.nc'
tair_out, pair_out, qair_out, cloud_out, rain_out, swrad_out = read_osaka(csvfile)
make_nc(frcfile, 'Tair', tair_out, 'Celsius')
make_nc(frcfile, 'Pair', pair_out, 'millibar')
make_nc(frcfile, 'Qair', qair_out, 'percentage')
make_nc(frcfile, 'cloud', cloud_out, 'nondimensional')
make_nc(frcfile, 'rain', rain_out, 'kilogram meter-2 second-1')
make_nc(frcfile, 'swrad', swrad_out, 'watt meter-2')
if __name__ == '__main__':
make_frc_file()
| mit |
clickpn/data-science-from-scratch | code/gradient_descent.py | 53 | 5895 | from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
| unlicense |
zaxtax/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 25 | 2252 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
mfjb/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/plot_Richardson_2D_const_BW_fast.py | 1 | 6864 | #!~/python
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
import lagrangian_stats
import csv
import advect_functions
# read offline
print 'reading particles'
exp = 'm_25_2b'
label = 'm_25_2b'
filename2D_BW = './csv/RD_2D_m_25_2b_particles.csv'
tt_BW = 439 # IC + 24-48 included
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = 1.*np.cumsum(dl)
depths = [5, 10, 15]
depthid = [1, 2, 3]
nl = len(depths)
RD_2D_BW = [] #np.zeros((tt_BW,nl))
time2D_BW = []
with open(filename2D_BW, 'r') as csvfile:
spamreader = csv.reader(csvfile)
spamreader.next()
for row in spamreader:
time2D_BW.append(row[0])
RD_2D_BW.append(row[1:])
time2D_BW = np.asarray(time2D_BW).astype(float)
RD_2D_BW = np.asarray(RD_2D_BW).astype(float)
time = time2D_BW[:]
# cut particles to time of interest
timeD = np.asarray(range(0,3*86400,1440))/3600.
vtime = time - time[0]
# read 3D eps and get eps at particle's location
drateD_BW = np.zeros((len(timeD),len(Zlist)))
for t in range(len(timeD)):
# print 'read drate', t
with open('../../2D/U/drate_3+1day/z/drate_m_25_2b_particles_'+str(t+60)+'_z.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
drateD_BW[t,:] = row[:]
# test drate
#plt.contourf(timeD/86400.,Zlist,np.log10(np.rot90(drateD_B)),30)
#plt.colorbar()
#plt.savefig('./plot/'+label+'/drate_B_'+label+'.eps')
#plt.close()
#
#plt.contourf((timeD)/3600.+48,Zlist,np.log10(np.rot90(drateD_BW)),30)
#plt.colorbar()
#plt.savefig('./plot/'+label+'/drate_BW_'+label+'.eps')
#print './plot/'+label+'/drate_BW_'+label+'.eps'
#plt.close()
# normalized RD
fig = plt.figure(figsize=(8, 5))
time2D_BW = time2D_BW - time2D_BW[0]
R2D5_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,0]/time2D_BW[:]**3),'k',linewidth=1)
R2D10_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,1]/time2D_BW[:]**3),'k--',linewidth=1)
R2D15_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,2]/time2D_BW[:]**3),'k-.',linewidth=1)
intm = 0.3*86400; intM = 2.5*86400; interval = (vtime > intm) * (vtime < intM)
R2D5_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,0]/time2D_BW[interval]**3),'k',linewidth=3.5)
intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
R2D10_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,1]/time2D_BW[interval]**3),'k--',linewidth=3.5)
intm = 0.6*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
R2D15_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,2]/time2D_BW[interval]**3),'k-.',linewidth=3.5)
#plt.legend((R2D5_BW,R2D10_BW,R2D15_BW,R2D5_B,R2D10_B,R2D15_B),('$BW25_m$ 5m','$BW25_m$ 10m','$BW25_m$ 15m','$B25_m$ 5m','$B25_m$ 10m','$B25_m$ 15m'), loc=1,fontsize=16,ncol=2)
plt.legend((R2D5_BW,R2D10_BW,R2D15_BW),('5m','10m','15m'), loc=1,fontsize=16,ncol=3)
plt.xlabel('Time $[hr]$',fontsize=20)
plt.ylabel('$log(\sigma^2_D t^{-3})$ ',fontsize=20)
plt.yticks(fontsize=16)
plt.ylim()
#ind = [0.,12.,24.,36.,48.,60.,72.,84.,96.,108.,120.,132.,144.,156.,168.,180.,192.]
ind = np.asarray([0.,12.,24.,48.,96.,192.])
#ind = np.linspace(0,24*8,7)
ind[0] = 1440/3600.
vind = np.log10(ind);# vind[0]=np.log10(1440/3600.)
plt.xticks(vind,['72.4','84','96','144','168','264'],fontsize=16)
plt.tight_layout()
plt.savefig('./plot/'+label+'/RDt3_2_BW_'+label+'.eps')
print './plot/'+label+'/RDt3_2_BW_'+label+'.eps'
plt.close()
# Rich 2D-3D
#fig = plt.figure(figsize=(8, 5))
fig, ax1 = plt.subplots(figsize=(8, 5))
#
# BW
intm = 0.3*86400; intM = 2.5*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,0]/time2D_BW[interval]**3/(drateD_BW[interval,depths[0]])
print 'Rich 2D 5m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 5m: mean', np.mean(drateD_BW[interval,depths[0]]), 'std', np.std(drateD_BW[interval,depths[0]])
R2D5_BW, = ax1.plot(time2D_BW[interval]/3600.,Rich,'r',linewidth=2)
intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,1]/time2D_BW[interval]**3/(drateD_BW[interval,depths[1]])
print 'Rich 2D 10m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 10m: mean', np.mean(drateD_BW[interval,depths[1]]), 'std', np.std(drateD_BW[interval,depths[1]])
R2D10_BW, = ax1.plot(time2D_BW[interval]/3600.,Rich,'r--',linewidth=2)
intm = 0.6*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,2]/time2D_BW[interval]**3/(drateD_BW[interval,depths[2]])
print 'Rich 2D 15m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 15m: mean', np.mean(drateD_BW[interval,depths[2]]), 'std', np.std(drateD_BW[interval,depths[2]])
R2D15_BW, = ax1.plot(time2D_BW[interval]/3600.,Rich,'r-.',linewidth=2)
#for tic in plt.xaxis.get_minor_ticks():
# tic.tick1On = tic.tick2On = False
#plt.legend((R2D1,R3D1,R2D5,R3D5,R2D17,R3D17),('2D 5m','3D 5m','2D 10m','3D 10m','2D 15m','3D 15m'),loc=3,fontsize=16,ncol=3)
#plt.legend((R2D5_BW,R2D10_BW,R2D15_BW,R2D5_B,R2D10_B,R2D15_B),('$BW25_m$ 5m','$BW25_m$ 10m','$BW25_m$ 15m','$B25_m$ 5m','$B25_m$ 10m','$B25_m$ 15m'),loc=2,fontsize=16,ncol=2)
dummy5, = ax1.plot([],[],'k',linewidth=2)
dummy10, = ax1.plot([],[],'k--',linewidth=2)
dummy15, = ax1.plot([],[],'k-.',linewidth=2)
ax1.legend((dummy5,dummy10,dummy15),('5m','10m','15m'),loc=1,fontsize=14,ncol=3)
#import matplotlib.lines as mlines
#l5 = mlines.Line2D([], [],'-',color='black', label='5m')
#l10 = mlines.Line2D([], [],'--',color='black', label='10m')
#l15 = mlines.Line2D([], [],'-.',color='black', label='15m')
#ax1.legend(handles=[l5,l10,l15],loc=1,fontsize=16,ncol=3)
ax1.set_xlabel('Time $[hr]$',fontsize=20)
ax1.set_ylabel('$\sigma^2_D t^{-3} \epsilon^{-1}$ ',fontsize=20,color='r')
ax1.set_ylim(0.02,0.18)
for tl in ax1.get_yticklabels():
tl.set_color('r')
tl.set_fontsize(16)
#plt.ylim(0.02,0.18)
#plt.yticks(fontsize=16)
ind = np.linspace(72,24*3+72,13)
ind[0] = 52
ax2 = ax1.twinx()
DR5, = ax2.plot(timeD,drateD_BW[:,depths[0]],'b',linewidth=2)
DR10, = ax2.plot(timeD,drateD_BW[:,depths[1]],'b--',linewidth=2)
DR15, = ax2.plot(timeD,drateD_BW[:,depths[2]],'b-.',linewidth=2)
#ax2.legend((DR5,DR10,DR15),('5m','10m','15m'),loc=1,fontsize=14,ncol=3)
ax2.set_ylim(0.,1.2e-8)
ax2.set_ylabel('$\epsilon$ ',fontsize=20,color='b')
for tl in ax2.get_yticklabels():
tl.set_color('b')
tl.set_fontsize(16)
plt.xlim(0,72)
#plt.xticks(ind,['','54','60','66','72','78','84','90','96','102','108','114','120'],fontsize=16)
plt.xticks(np.linspace(0,72,13),np.linspace(0,72,13).astype(int),fontsize=16)
plt.tight_layout()
plt.savefig('./plot/'+label+'/Rich_2_BW_'+label+'.eps')
print './plot/'+label+'/Rich_2_BW_'+label+'.eps'
plt.close()
| gpl-2.0 |
sgenoud/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
suclearnub/discordgrapher | plot.py | 1 | 7186 | from tqdm import tqdm
import argparse
import datetime
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def plotLong(): #Plotting messages/day vs day
print("OK, now generating a long graph.")
plotLongArray = copy.copy(processedArray) #A bit inefficient but it'll do.
with tqdm(leave=True, unit=' messages', total=lineNumber, desc="Preparing") as counter:
for line in plotLongArray:
line[0] = datetime.date.fromtimestamp(line[0])
counter.update(1)
for row in plotLongArray:
del row[1]
print("Generating graph...")
plotLongDateString2 = [item for sublist in plotLongArray for item in sublist]
plotLongCount = [[x,plotLongDateString2.count(x)] for x in set(plotLongDateString2)]
r = np.asarray(plotLongCount)
r = r[r[:,0].argsort()]
years = mdates.YearLocator()
months = mdates.MonthLocator()
yearsFmt = mdates.DateFormatter('%Y')
fig, ax = plt.subplots()
ax.plot(r[:,0],r[:,1])
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
fig.autofmt_xdate()
plt.xlabel("Date")
plt.ylabel("Messages")
plt.show()
quit()
def plotWeekHour(): #Plotting messges per hour for a week
print("Ok, now generating a week graph.")
plotWeekArray = copy.copy(processedArray)
with tqdm(leave=True, unit= ' messages', total=lineNumber, desc="Preparing") as counter:
for line in plotWeekArray:
second = datetime.timedelta(seconds=int((line[0]-345600)%604800))
h = datetime.datetime(1,1,1)+second
line[0] = h.hour+((h.day-1)*24)
counter.update(1)
for row in plotWeekArray:
del row[1]
print("Generating graph...")
plotWeekFlat= [item for sublist in plotWeekArray for item in sublist]
plotWeekHourCount = [[x,plotWeekFlat.count(x)] for x in set(plotWeekFlat)]
r = np.asarray(plotWeekHourCount)
r = r[r[:,0].argsort()]
fig, ax = plt.subplots()
ax.plot(r[:,0],r[:,1])
ax.grid(True)
plt.xlabel("Day of Week (Starts Sunday 0000UTC)")
plt.ylabel("Messages")
plt.xticks([0,24,48,72,96,120,144,168],["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"])
plt.xlim([0,168])
plt.show()
quit()
def plotUsers(): #Plot the most active users
usersToPlot = 10
print("Ok, now generating a most active users graph. This could take minutes for large servers!")
plotUserArray = [line[1][:10] for line in processedArray]
plotUserArrayCount = [[x,plotUserArray.count(x)] for x in set(plotUserArray)]
plotUserArrayCount.sort(key=lambda x: x[1], reverse=True)
plotUserArrayCount = plotUserArrayCount[:usersToPlot]
fig, ax = plt.subplots()
users = [line[0] for line in plotUserArrayCount]
frequency = [line[1] for line in plotUserArrayCount]
y_pos = np.arange(len(users))
plt.bar(y_pos, frequency, align='center', alpha=0.5)
plt.xlim([min(y_pos)-0.5, max(y_pos)+0.5])
plt.xticks(y_pos, users)
plt.ylabel('Messages')
ax.grid(True)
plt.show()
quit()
parser = argparse.ArgumentParser(description='Discord channel imager. Remember to scrape using scrape.py first!')
requiredNamed = parser.add_argument_group('Required arguments')
requiredNamed.add_argument('-i', '--input', type=str, help='Textfile source. Must be unaltered output from scrape.py.', required=True)
optional = parser.add_argument_group('Plotting arguments, pick one')
optional.add_argument('-l', '--graphlong', action='store_true', help='Graph a long-term graph')
optional.add_argument('-w', '--graphweek', action='store_true', help='Graph a messages per hour over a weekday')
optional.add_argument('-a', '--graphusers', action='store_true', help='Graph the most active users.')
kw = parser.add_argument_group('Graph modifications')
kw.add_argument('-s', '--search', type=str, default='None', help='Search and only plot specific phrase.')
kw.add_argument('-u', '--usersearch', type=str, default='None', help='Search and only plot a specific user.')
args = parser.parse_args()
if not args.graphlong and not args.graphweek and not args.graphusers:
print("No graph picked for plotting. Aborting.")
quit()
if args.graphusers and args.usersearch != "None":
print("Cannot plot most active users and filter for a specific user at the same time. Aborting.")
quit()
with open(args.input, 'r') as textfile:
textfileArray = [line.strip() for line in textfile]
lineNumber = len(textfileArray)
print("Opened file.")
processedArray = []
with tqdm(leave=True,unit=' messages', total=lineNumber, desc="Processing - Stage 1") as counter:
for line in textfileArray:
timestamp, name, message = line.split(" - ")[:3]
if args.search != "None":
processedArray.append([timestamp, name, message])
else:
processedArray.append([timestamp, name])
counter.update(1)
if args.search != "None":
processedArraySearch = []
print("Filtering keywords...")
processedArraySearch = [line for line in processedArray if args.search in line[2]]
for row in processedArraySearch:
del row[2]
processedArray.clear()
processedArray = copy.copy(processedArraySearch)
lineNumber = len(processedArray)
processedArraySearch.clear()
if args.usersearch != "None":
processedArraySearch = []
print("Filtering users...")
processedArraySearch = [line for line in processedArray if args.usersearch in line[1]]
for row in processedArraySearch:
del row[2]
processedArray.clear()
processedArray = copy.copy(processedArraySearch)
lineNumber = len(processedArray)
processedArraySearch.clear()
if len(processedArray) is 0:
print("Nothing found... Aborting.")
quit()
with tqdm(leave=True,unit=' messages', total=lineNumber, desc="Processing - Stage 2") as counter:
for line in processedArray:
line[0] = line[0][2:] #Get rid of the pesky b' at the front
stampSplitted = line[0].split(" ")
dateSplitted = stampSplitted[0].split("-") #dateSplitted is year,month,day
timeSplitted = stampSplitted[1].split(":") #timeSplitted is hour,minute,second
if len(timeSplitted[2]) == 9:
timeSplitted[2] = timeSplitted[2][:-7]
else:
pass
dateSplitted = [int(x) for x in dateSplitted]
timeSplitted = [int(x) for x in timeSplitted]
dateString = datetime.date(dateSplitted[0],dateSplitted[1],dateSplitted[2])
timeString = datetime.time(timeSplitted[0], timeSplitted[1], timeSplitted[2])
combined = datetime.datetime.combine(dateString,timeString)
line[0] = time.mktime(combined.timetuple())
counter.update(1)
if args.graphlong:
plotLong()
elif args.graphweek:
plotWeekHour()
elif args.graphusers:
plotUsers()
| mit |
DataViva/dataviva-scripts | scripts/attrs/get_proximity.py | 1 | 3856 | ''' Import statements '''
import os, sys, click, MySQLdb
import pandas as pd
import pandas.io.sql as sql
import numpy as np
'''
USAGE:
python scripts/attrs/get_proximity.py -y 2014 -a cnae -i bra -o data/attr -t rais_ybi -c num_emp
'''
''' Load product space calculations lib '''
file_path = os.path.dirname(os.path.realpath(__file__))
ps_calcs_lib_path = os.path.abspath(os.path.join(file_path, "../../", "lib/ps_calcs"))
sys.path.insert(0, ps_calcs_lib_path)
import ps_calcs
''' Connect to DB '''
db = MySQLdb.connect(host=os.environ["DATAVIVA_DB_HOST"], user=os.environ["DATAVIVA_DB_USER"],
passwd=os.environ["DATAVIVA_DB_PW"],
db=os.environ["DATAVIVA_DB_NAME"])
db.autocommit(1)
cursor = db.cursor()
depths = {
"bra": [1, 3, 5, 7, 9],
"hs": [2, 6],
"wld": [2, 5],
"cnae": [1, 3, 6],
"cbo": [1, 4],
"course_sc": [2, 5],
"course_hedu": [2, 6],
"university": [5],
}
attr_types = ['bra', 'hs', 'wld', 'cnae', 'cbo', 'course_sc', 'course_hedu', 'university']
def get_years(year):
if "," not in year and "-" not in year:
return [int(year)]
if "-" in year:
start, end = year.split("-")
return range(int(start), int(end)+1)
if "," in year:
return [int(y) for y in year.split(",")]
@click.command()
@click.option('-y', '--year', prompt='Year', help='year of the data to convert', required=True)
@click.option('output_path', '--output', '-o', help='Path to save files to.', type=click.Path(), required=True, prompt="Output path")
# @click.option('attr', '--attr', '-a', click.Choice(attr_types), required=True, prompt=True)
@click.option('-a', '--attr', type=click.Choice(attr_types), required=True, prompt='Attr')
@click.option('-i', '--i_attr', type=click.Choice(attr_types), required=True, prompt='Intermediate Attr')
@click.option('-t', '--table', required=True, prompt='DB Table')
@click.option('-c', '--column', required=True, prompt='DB Column')
def prox(year, output_path, attr, i_attr, table, column):
attr_depths = depths[attr]
i_attr_depths = depths[i_attr]
years = get_years(year)
for year in years:
print "year:", year
for i, depth in enumerate(attr_depths):
print attr, "depth:", depth
query = """
SELECT {0}_id, {1}_id, {2}
FROM {3}
WHERE year=%s
""".format(attr, i_attr, column, table)
if len(attr_depths) > 1:
query += " and {}_id_len={}".format(attr, depth)
if len(i_attr_depths) > 1:
query += " and {}_id_len={}".format(i_attr, i_attr_depths[-1])
if "secex" in table:
query += " and month=0"
data = sql.read_sql(query, db, params=[year])
data = data.pivot(index="{}_id".format(i_attr), columns="{}_id".format(attr), values=column)
rcas = ps_calcs.rca(data)
rcas[rcas >= 1] = 1
rcas[rcas < 1] = 0
prox = ps_calcs.proximity(rcas)
prox = pd.DataFrame(prox.unstack(), columns=["{}_proximity".format(i_attr)])
prox["year"] = year
prox = prox.set_index("year", append=True)
output_path_w_year = os.path.abspath(os.path.join(output_path, str(year)))
if not os.path.exists(output_path_w_year): os.makedirs(output_path_w_year)
fp = os.path.join(output_path_w_year, "{}_{}_proximity.tsv".format(attr, i_attr))
file_mode = 'a' if i else 'w'
user_header = False if i else True
with open(fp, file_mode) as f:
prox.to_csv(f, header=user_header, sep="\t")
if __name__ == "__main__":
prox()
| mit |
InnovArul/codesmart | Assignments/Jan-May-2018/GeometryAndPhotometryBasedComputerVision/homework2/utils.py | 1 | 8601 | import numpy as np
import matplotlib.pyplot as plt
import time, cv2, math
from skimage import transform, io, img_as_float
matinverse = np.linalg.inv
def show_image(image):
dpi = 80
figsize = (image.shape[1]/float(dpi), image.shape[0]/float(dpi))
fig = plt.figure(figsize=figsize);
plt.imshow(image);
fig.show()
def read_image(img_path):
return img_as_float(io.imread(img_path))
def get_arranged_point(point):
return [[point[0], point[1], 1]]
def get_normalized_coord(point):
return [point[0]/point[2], point[1]/point[2], 1]
def get_line(point1, point2):
#print((point1, point2))
return np.cross(get_normalized_coord(point1), get_normalized_coord(point2))
def get_intersection_point(line1, line2):
return np.cross(get_normalized_coord(line1), get_normalized_coord(line2))
def get_lines(line_points, img_shape):
height, width, channels = img_shape
normalized_points = []
for point in line_points:
normalized_points += get_arranged_point(point)
print(normalized_points)
line1 = get_normalized_coord(get_line(normalized_points[0], normalized_points[1]))
line2 = get_normalized_coord(get_line(normalized_points[2], normalized_points[3]))
return line1, line2
def get_homogeneous_coords(points):
return [get_normalized_coord(point) for point in points]
def nullspace(A, atol=1e-13, rtol=0):
"""Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Return value
------------
ns : ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
"""
A = np.atleast_2d(A)
u, s, vh = np.linalg.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj()[0]
print(ns)
return ns
def getHomographyMatrix(img1Points, img2Points):
A = np.empty(shape=[0, 9])
# matrix
#
# [ x y 1 0 0 0 -xx' -yx' -x'] = [0]
# [ 0 0 0 x y 1 -xy' -yy' -y'] = [0]
# .
# .
# here x1 corresponds to actual image points,
# x2 corresponds to virtual points assumed for the problem
for index in range(len(img1Points)):
x1 = img1Points[index][0]
y1 = img1Points[index][1]
x2 = img2Points[index][0]
y2 = img2Points[index][1]
A = np.append(A, [[x1, y1, 1, 0, 0, 0, -x1 * x2, -y1 * x2, -x2]], axis=0);
A = np.append(A, [[ 0, 0, 0, x1, y1, 1, -x1 * y2, -y1 * y2, -y2]], axis=0);
U, D, Vt = np.linalg.svd(A)
H = np.linalg.inv(Vt)[:, -1]
H = np.reshape(H, (3,3))
# print the homography matrix
print(H)
print(matinverse(H))
return H
def getHomographedSourceImage(img, H, sourceSize):
# get the dimension of the image
height, width, channels = sourceSize
sourceImage = np.zeros((height, width, channels), np.uint8)
Hinv = np.linalg.inv(H)
for i in range(height):
for j in range(width):
iSourceCoord = np.dot(Hinv, np.array([[j], [i], [1]]));
iSourceX = (iSourceCoord[0] / iSourceCoord[2])[0];
iSourceY = (iSourceCoord[1] / iSourceCoord[2])[0];
# if the resultant coordinates are float, then get bilinear interpolate the pixels
# bilinearInterpolate considers 2nd input as along height and 3rd input as along width
sourceImage[i, j] = bilinearInterpolate(img, iSourceY, iSourceX)
return sourceImage
def get_normalization_matrix(img_shape, is_normalize):
if not is_normalize:
return np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
else:
norm_const = np.max(img_shape)
return np.array([[2/(norm_const-1), 0, -1],
[0, 2/(norm_const-1), -1],
[0, 0, 1/np.sqrt(3)]])
def get_points_from_gui(num_points, fig, img, is_close_at_end):
global coords
coords = []
if img is not None:
fig = plt.figure()
plt.imshow(img)
# click event handler for the plt figure
def onclick(event):
global ix, iy
ix, iy = event.xdata, event.ydata
print('x = %d, y = %d'%(ix, iy))
plt.plot([ix], [iy], marker='o', markersize=3, color="red")
global coords
coords.append((ix, iy))
fig.canvas.draw()
if len(coords) == num_points:
fig.canvas.mpl_disconnect(cid)
time.sleep(1)
plt.close(1)
return coords
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
return coords, fig
def bilinearInterpolate(img, x, y):
# get the dimension of the image
height, width = img.shape[:2]
if(x < 0 or x > height - 1):
return np.zeros(3);
if(y < 0 or y > width - 1):
return np.zeros(3);
modX = x % 1;
modY = y % 1;
if((modX == 0) and (modY == 0)):
value = img[x, y]
else:
# bilinear interpolation
# find four corners
topLeft = img[int(math.floor(x)), int(math.floor(y))];
topRight = img[int(math.floor(x)), int(math.ceil(y))];
bottomLeft = img[int(math.ceil(x)), int(math.floor(y))];
bottomRight = img[int(math.ceil(x)), int(math.ceil(y))];
value = ((1 - modX) * (1 - modY) * topLeft) + \
((1 - modX) * modY * topRight) + \
(modX * (1 - modY) * bottomLeft) + \
(modX * modY * bottomRight);
return value
def rotateImage(img, theta):
# get the dimension of the image
height, width = img.shape
rotatedImage = np.zeros((height, width), np.uint8)
thetaR = math.radians(theta)
for i in range(height):
for j in range(width):
newCoordX = i - (height / 2);
newCoordY = j - (width / 2);
iSourceX = (newCoordX * math.cos(thetaR)) + (newCoordY * math.sin(thetaR));
iSourceY = (-newCoordX * math.sin(thetaR)) + (newCoordY * math.cos(thetaR));
# readd the translation done already
iSourceX = iSourceX + (height / 2);
iSourceY = iSourceY + (width / 2);
# if the resultant coordiantes are float, then get bilinear interpolate the pixels
rotatedImage[i, j] = bilinearInterpolate(img, iSourceX, iSourceY)
return rotatedImage
def scaleImage(img, sx, sy):
# get the dimension of the image
height, width = img.shape
outHeight = height; # int(math.ceil(sx * height));
outWidth = width; # int(math.ceil(sy * width))
scaledImage = np.zeros((outHeight, outWidth), np.uint8)
for i in range(outHeight):
for j in range(outWidth):
newCoordX = i - (outHeight / 2);
newCoordY = j - (outWidth / 2);
iSourceX = (newCoordX / sx);
iSourceY = (newCoordY / sy);
# re translation in source image space
iSourceX = iSourceX + (height / 2);
iSourceY = iSourceY + (width / 2);
# if the resultant coordiantes are float, then get bilinear interpolate the pixels
scaledImage[i, j] = bilinearInterpolate(img, iSourceX, iSourceY)
return scaledImage
def translateImage(img, tx, ty):
# get the dimension of the image
height, width = img.shape
translatedImage = np.zeros((height, width), np.uint8)
for i in range(height):
for j in range(width):
iSourceX = i - tx;
iSourceY = j - ty;
# if the resultant coordiantes are float, then get bilinear interpolate the pixels
translatedImage[i, j] = bilinearInterpolate(img, iSourceX, iSourceY)
return translatedImage
| gpl-2.0 |
q1ang/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
BenjaminBossan/nolearn | nolearn/grid_search.py | 9 | 2364 | """:func:`grid_search` is a wrapper around
:class:`sklearn.grid_search.GridSearchCV`.
:func:`grid_search` adds a printed report to the standard
:class:`GridSearchCV` functionality, so you know about the best score
and parameters.
Usage example:
.. doctest::
>>> import numpy as np
>>> class Dataset:
... def __init__(self, data, target):
... self.data, self.target = data, target
...
>>> from sklearn.linear_model import LogisticRegression
>>> data = np.array([[1, 2, 3], [3, 3, 3]] * 20)
>>> target = np.array([0, 1] * 20)
>>> dataset = Dataset(data, target)
>>> model = LogisticRegression()
>>> parameters = dict(C=[1.0, 3.0])
>>> grid_search(dataset, model, parameters) # doctest: +ELLIPSIS
parameters:
{'C': [1.0, 3.0]}
...
Best score: 1.0000
Best grid parameters:
C=1.0,
...
"""
from __future__ import print_function
from pprint import pprint
import warnings
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
warnings.warn("""\
The nolearn.grid_search module will be removed in nolearn 0.6. If you want to
continue to use this module, please consider copying the code into
your own project.
""")
def print_report(grid_search, parameters):
print()
print("== " * 20)
print("All parameters:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name, value in sorted(best_parameters.items()):
if not isinstance(value, BaseEstimator):
print(" %s=%r," % (param_name, value))
print()
print("== " * 20)
print("Best score: %0.4f" % grid_search.best_score_)
print("Best grid parameters:")
for param_name in sorted(parameters.keys()):
print(" %s=%r," % (param_name, best_parameters[param_name]))
print("== " * 20)
return grid_search
def grid_search(dataset, clf, parameters, cv=None, verbose=4, n_jobs=1,
**kwargs):
# See http://scikit-learn.org/stable/modules/grid_search.html
grid_search = GridSearchCV(
clf,
parameters,
cv=cv,
verbose=verbose,
n_jobs=n_jobs,
**kwargs
)
if verbose:
print("parameters:")
pprint(parameters)
grid_search.fit(dataset.data, dataset.target)
if verbose:
print_report(grid_search, parameters)
return grid_search
| mit |
ycaihua/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
ky822/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
nosarcasm/philoexplorer | nodes_active.py | 1 | 1652 | #!/usr/bin/env python
import web
import json
import math
import numpy as np
import scipy as sp
import pandas as pd
import networkx as nx
import datetime
urls = ('/','root')
app = web.application(urls,globals())
class root:
def __init__(self):
self.hello = "hello world"
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
output = dict()
getInput = web.input(start='2012-3-03 16:00:00', end='2012-3-03 21:00:00')
start_time=pd.to_datetime(getInput.start).tz_localize('US/Eastern') - pd.DateOffset(hours=10)
end_time=pd.to_datetime(getInput.end).tz_localize('US/Eastern') - pd.DateOffset(hours=10)
output_nodes = set()
all_schedules = pd.read_json('all_schedules.json')
allnodes = pd.read_json('allnodes.json')
nodes = set(allnodes.nodes)
all_schedules['end'] = all_schedules['end'].map(lambda x: datetime.datetime.fromtimestamp(x/1000000000))
all_schedules['start'] = all_schedules['start'].map(lambda x: datetime.datetime.fromtimestamp(x/1000000000))
night_sched = all_schedules[(all_schedules.start >= start_time) & (all_schedules.end <= end_time)]
on_nodes = set()
for idx,show in night_sched.iterrows():
on_nodes.add(show[2])
off_nodes = nodes.difference(on_nodes)
imported_graph = nx.read_gexf('./finished_network3.gexf')
for i in off_nodes:
try:
imported_graph.remove_node(i)
except:
continue
pr=nx.pagerank(imported_graph,alpha=0.9,weight='newweight',tol=.01, max_iter=200)
output['nodes'] = [(i,v*1000000) for i,v in pr.items()]
output['input_params'] = getInput
return json.dumps(output)
if __name__ == "__main__":
app.run()
| gpl-3.0 |
cni/MRS | MRS/version.py | 2 | 1899 | """MRS version/release information"""
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
description = "MRS analysis software"
long_description = """
The STANFORD CNI MRS ANALYSIS LIBRARY (SMAL)
--------------------------------------------
This library contains implementations of analysis of data acquired in
magnetic resonance spectroscopy experiments (MRS).
Copyright (c) 2013-, Ariel Rokem, Grace Tang.
Stanford University.
All rights reserved.
"""
NAME = "MRS"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://cni.github.io/MRS"
DOWNLOAD_URL = "http://github.com/arokem/MRS"
LICENSE = "MIT"
AUTHOR = "Ariel Rokem, Grace Tang"
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['MRS', 'MRS.leastsqbound', 'MRS.tests']
BIN = 'bin/'
PACKAGE_DATA = {"MRS": ["LICENSE"]}
REQUIRES = ["numpy", "matplotlib", "scipy",
"nibabel", "nipy", "nitime", "nipype"]
| mit |
akionakamura/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
kose-y/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
jamesrp/pyeq2 | Examples/GUI/wxPythonFit.py | 2 | 10814 | import os, sys, cPickle
import wx # ensure this import works before starting the application
import matplotlib # ensure this import works before starting the application
# ensure pyeq2 can be imported before starting the application
if -1 != sys.path[0].find('pyeq2-master'):raise Exception('Please rename git checkout directory from "pyeq2-master" to "pyeq2"')
exampleFileDirectory = sys.path[0][:sys.path[0].rfind(os.sep)]
pyeq2IimportDirectory = os.path.join(os.path.join(exampleFileDirectory, '..'), '..')
if pyeq2IimportDirectory not in sys.path:
sys.path.append(pyeq2IimportDirectory)
import pyeq2
# local imports from application subdirectory
import guifiles.icon as icon
import guifiles.DataForControls as dfc
import guifiles.CustomDialogs as CustomDialogs
import guifiles.CustomEvents as CustomEvents
import guifiles.CustomThreads as CustomThreads
class ApplicationFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Example wxPython Curve And Surface Fitter",
size=(800,600))
# wx converted an icon file to a Python file for embedding here, see icon.py file
self.SetIcon(icon.icon.GetIcon())
p = wx.Panel(self) # something to put the controls on
# create the controls
# no need to use "self." as these are not referenced by other methods
label1 = wx.StaticText(p, -1, "--- 2D Data Text Editor ---")
label2 = wx.StaticText(p, -1, "--- 3D Data Text Editor ---")
# use "self" because of references in other methods
self.text_2D = wx.TextCtrl(p, -1, dfc.exampleText_2D,
style=wx.TE_MULTILINE|wx.HSCROLL)
self.text_3D = wx.TextCtrl(p, -1, dfc.exampleText_3D,
style=wx.TE_MULTILINE|wx.HSCROLL)
# use "self" because of references in other methods
self.rbFittingTargetChoice_2D = wx.RadioBox(
p, -1, "Fitting Target 2D", wx.DefaultPosition, wx.DefaultSize,
dfc.fittingTargetList, 1, wx.RA_SPECIFY_COLS
)
self.rbFittingTargetChoice_3D = wx.RadioBox(
p, -1, "Fitting Target 3D", wx.DefaultPosition, wx.DefaultSize,
dfc.fittingTargetList, 1, wx.RA_SPECIFY_COLS
)
# use "self" because of references in other methods
self.rbEqChoice_2D = wx.RadioBox(
p, -1, "Example 2D Equations", wx.DefaultPosition, wx.DefaultSize,
dfc.exampleEquationList_2D, 1, wx.RA_SPECIFY_COLS
)
self.rbEqChoice_3D = wx.RadioBox(
p, -1, "Example 3D Equations", wx.DefaultPosition, wx.DefaultSize,
dfc.exampleEquationList_3D, 1, wx.RA_SPECIFY_COLS
)
# use "self" because of references in other methods
self.btnFit2D = wx.Button(p, -1, "Fit 2D Text Data")
self.btnFit3D = wx.Button(p, -1, "Fit 3D Text Data")
# setup the layout with grid sizer
fgs = wx.FlexGridSizer(5, 2, 10, 20)
fgs.AddGrowableRow(1)
fgs.AddGrowableCol(0)
fgs.AddGrowableCol(1)
fgs.Add(label1, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(label2, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.text_2D, 0, wx.EXPAND)
fgs.Add(self.text_3D, 0, wx.EXPAND)
fgs.Add(self.rbEqChoice_2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbEqChoice_3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbFittingTargetChoice_2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbFittingTargetChoice_3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.btnFit2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.btnFit3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
border = wx.BoxSizer()
border.Add(fgs, 1, wx.EXPAND|wx.ALL, 10)
p.SetSizer(border)
# all controls on the main panel have been added with sizers,
# now center the application window on the user's display
self.Center()
# this dialog will not be displayed unless fitting is in progress
# use "self" because of references in other methods
self.statusBox = CustomDialogs.StatusDialog(self, '', "Status")
# Bind the button events to their application methods
self.Bind(wx.EVT_BUTTON, self.OnFit2D, self.btnFit2D)
self.Bind(wx.EVT_BUTTON, self.OnFit3D, self.btnFit3D)
# Set up event handler for any worker thread results
CustomEvents.EVT_THREADSTATUS(self, self.OnThreadStatus)
self.fittingWorkerThread = None
def OnThreadStatus(self, event):
if type(event.data) == type(''): # strings are status updates
self.statusBox.text.AppendText(event.data + "\n")
else: # not string data type, the worker thread completed
self.fittingWorkerThread = None
# event.data will be the fitted equation
pickledEquationFile = open("pickledEquationFile", "wb")
cPickle.dump(event.data, pickledEquationFile)
pickledEquationFile.close()
self.btnFit2D.Enable()
self.btnFit3D.Enable()
self.statusBox.Hide()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
dialogDirectory = os.path.join(currentDirectory, 'guifiles')
commandString = os.path.join(dialogDirectory, 'CustomDialogs.py')
os.popen(sys.executable + ' ' + commandString)
def OnFit2D(self, evt):
textData = str(self.text_2D.GetValue())
equationSelection = self.rbEqChoice_2D.GetStringSelection()
fittingTargetSelection = self.rbFittingTargetChoice_2D.GetStringSelection()
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
if equationSelection == 'Linear Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Linear(fittingTarget)
if equationSelection == 'Quadratic Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Quadratic(fittingTarget)
if equationSelection == 'Cubic Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Cubic(fittingTarget)
if equationSelection == 'Witch Of Maria Agnesi A':
self.equation = pyeq2.Models_2D.Miscellaneous.WitchOfAgnesiA(fittingTarget)
if equationSelection == 'VanDeemter Chromatography':
self.equation = pyeq2.Models_2D.Engineering.VanDeemterChromatography(fittingTarget)
if equationSelection == 'Gamma Ray Angular Distribution (degrees) B':
self.equation = pyeq2.Models_2D.LegendrePolynomial.GammaRayAngularDistributionDegreesB(fittingTarget)
if equationSelection == 'Exponential With Offset':
self.equation = pyeq2.Models_2D.Exponential.Exponential(fittingTarget, 'Offset')
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
wx.MessageBox(self.equation.reasonWhyDataRejected, "Error")
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
wx.MessageBox("This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".", "Error")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.btnFit2D.Disable()
self.btnFit3D.Disable()
self.statusBox.text.SetValue('')
self.statusBox.Show() # hidden by OnThreadStatus() when thread completes
# thread will automatically start to tun
self.fittingWorkerThread = CustomThreads.FittingThread(self, self.equation)
def OnFit3D(self, evt):
textData = str(self.text_3D.GetValue())
equationSelection = self.rbEqChoice_3D.GetStringSelection()
fittingTargetSelection = self.rbFittingTargetChoice_3D.GetStringSelection()
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
if equationSelection == 'Linear Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.Linear(fittingTarget)
if equationSelection == 'Full Quadratic Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.FullQuadratic(fittingTarget)
if equationSelection == 'Full Cubic Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.FullCubic(fittingTarget)
if equationSelection == 'Monkey Saddle A':
self.equation = pyeq2.Models_3D.Miscellaneous.MonkeySaddleA(fittingTarget)
if equationSelection == 'Gaussian Curvature Of Whitneys Umbrella A':
self.equation = pyeq2.Models_3D.Miscellaneous.GaussianCurvatureOfWhitneysUmbrellaA(fittingTarget)
if equationSelection == 'NIST Nelson Autolog':
self.equation = pyeq2.Models_3D.NIST.NIST_NelsonAutolog(fittingTarget)
if equationSelection == 'Custom Polynomial One':
self.equation = pyeq2.Models_3D.Polynomial.UserSelectablePolynomial(fittingTarget, "Default", 3, 1)
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
wx.MessageBox(self.equation.reasonWhyDataRejected, "Error")
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
wx.MessageBox("This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".", "Error")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.btnFit2D.Disable()
self.btnFit3D.Disable()
self.statusBox.text.SetValue('')
self.statusBox.Show() # hidden by OnThreadStatus() when thread completes
# thread will automatically start to run
self.fittingWorkerThread = CustomThreads.FittingThread(self, self.equation)
if __name__ == "__main__":
app = wx.App()
frm = ApplicationFrame()
frm.Show()
app.MainLoop()
| bsd-2-clause |
AtsushiSakai/PythonRobotics | PathPlanning/RRT/rrt.py | 1 | 7202 | """
Path planning Sample Code with Randomized Rapidly-Exploring Random Trees (RRT)
author: AtsushiSakai(@Atsushi_twi)
"""
import math
import random
import matplotlib.pyplot as plt
import numpy as np
show_animation = True
class RRT:
"""
Class for RRT planning
"""
class Node:
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.path_x = []
self.path_y = []
self.parent = None
def __init__(self,
start,
goal,
obstacle_list,
rand_area,
expand_dis=3.0,
path_resolution=0.5,
goal_sample_rate=5,
max_iter=500):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Random Sampling Area [min,max]
"""
self.start = self.Node(start[0], start[1])
self.end = self.Node(goal[0], goal[1])
self.min_rand = rand_area[0]
self.max_rand = rand_area[1]
self.expand_dis = expand_dis
self.path_resolution = path_resolution
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.obstacle_list = obstacle_list
self.node_list = []
def planning(self, animation=True):
"""
rrt path planning
animation: flag for animation on or off
"""
self.node_list = [self.start]
for i in range(self.max_iter):
rnd_node = self.get_random_node()
nearest_ind = self.get_nearest_node_index(self.node_list, rnd_node)
nearest_node = self.node_list[nearest_ind]
new_node = self.steer(nearest_node, rnd_node, self.expand_dis)
if self.check_collision(new_node, self.obstacle_list):
self.node_list.append(new_node)
if animation and i % 5 == 0:
self.draw_graph(rnd_node)
if self.calc_dist_to_goal(self.node_list[-1].x,
self.node_list[-1].y) <= self.expand_dis:
final_node = self.steer(self.node_list[-1], self.end,
self.expand_dis)
if self.check_collision(final_node, self.obstacle_list):
return self.generate_final_course(len(self.node_list) - 1)
if animation and i % 5:
self.draw_graph(rnd_node)
return None # cannot find path
def steer(self, from_node, to_node, extend_length=float("inf")):
new_node = self.Node(from_node.x, from_node.y)
d, theta = self.calc_distance_and_angle(new_node, to_node)
new_node.path_x = [new_node.x]
new_node.path_y = [new_node.y]
if extend_length > d:
extend_length = d
n_expand = math.floor(extend_length / self.path_resolution)
for _ in range(n_expand):
new_node.x += self.path_resolution * math.cos(theta)
new_node.y += self.path_resolution * math.sin(theta)
new_node.path_x.append(new_node.x)
new_node.path_y.append(new_node.y)
d, _ = self.calc_distance_and_angle(new_node, to_node)
if d <= self.path_resolution:
new_node.path_x.append(to_node.x)
new_node.path_y.append(to_node.y)
new_node.x = to_node.x
new_node.y = to_node.y
new_node.parent = from_node
return new_node
def generate_final_course(self, goal_ind):
path = [[self.end.x, self.end.y]]
node = self.node_list[goal_ind]
while node.parent is not None:
path.append([node.x, node.y])
node = node.parent
path.append([node.x, node.y])
return path
def calc_dist_to_goal(self, x, y):
dx = x - self.end.x
dy = y - self.end.y
return math.hypot(dx, dy)
def get_random_node(self):
if random.randint(0, 100) > self.goal_sample_rate:
rnd = self.Node(
random.uniform(self.min_rand, self.max_rand),
random.uniform(self.min_rand, self.max_rand))
else: # goal point sampling
rnd = self.Node(self.end.x, self.end.y)
return rnd
def draw_graph(self, rnd=None):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if rnd is not None:
plt.plot(rnd.x, rnd.y, "^k")
for node in self.node_list:
if node.parent:
plt.plot(node.path_x, node.path_y, "-g")
for (ox, oy, size) in self.obstacle_list:
self.plot_circle(ox, oy, size)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis("equal")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
@staticmethod
def plot_circle(x, y, size, color="-b"): # pragma: no cover
deg = list(range(0, 360, 5))
deg.append(0)
xl = [x + size * math.cos(np.deg2rad(d)) for d in deg]
yl = [y + size * math.sin(np.deg2rad(d)) for d in deg]
plt.plot(xl, yl, color)
@staticmethod
def get_nearest_node_index(node_list, rnd_node):
dlist = [(node.x - rnd_node.x)**2 + (node.y - rnd_node.y)**2
for node in node_list]
minind = dlist.index(min(dlist))
return minind
@staticmethod
def check_collision(node, obstacleList):
if node is None:
return False
for (ox, oy, size) in obstacleList:
dx_list = [ox - x for x in node.path_x]
dy_list = [oy - y for y in node.path_y]
d_list = [dx * dx + dy * dy for (dx, dy) in zip(dx_list, dy_list)]
if min(d_list) <= size**2:
return False # collision
return True # safe
@staticmethod
def calc_distance_and_angle(from_node, to_node):
dx = to_node.x - from_node.x
dy = to_node.y - from_node.y
d = math.hypot(dx, dy)
theta = math.atan2(dy, dx)
return d, theta
def main(gx=6.0, gy=10.0):
print("start " + __file__)
# ====Search Path with RRT====
obstacleList = [(5, 5, 1), (3, 6, 2), (3, 8, 2), (3, 10, 2), (7, 5, 2),
(9, 5, 2), (8, 10, 1)] # [x, y, radius]
# Set Initial parameters
rrt = RRT(
start=[0, 0],
goal=[gx, gy],
rand_area=[-2, 15],
obstacle_list=obstacleList)
path = rrt.planning(animation=show_animation)
if path is None:
print("Cannot find path")
else:
print("found path!!")
# Draw final path
if show_animation:
rrt.draw_graph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
| mit |
neep305/swordfish | text_analysis/views.py | 1 | 1308 | from django.shortcuts import render
import pandas as pd
from text_analysis import fileutil
from text_analysis import dbutil
import csv
from konlpy.tag import Hannanum
from collections import Counter
# Create your views here.
def home_page(request, pid):
print("Hello World")
# read csv from local
result = fileutil.read_localcsv('data/livetalk.csv')
return render(request, 'home.html', context={'data':"hello world!",})
def viz_page(request):
print("Viz display")
return render(request, 'viz.html')
def nasdaq_page(request):
path = 'data/livetalk.csv'
# get csv data from local path
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
content = ''
for line in reader:
content += ' ' + line['BLTTHG_CNTNT']
# initiate Hannanum instance
h = Hannanum()
nouns = h.nouns(content)
c = Counter(nouns)
# insert data to mongodb
# connection = dbutil.get_mongo_connection()
# tbl = dbutil.get_mongo_connection(connection)
# tbl.insert_bulk_data()
# dbutil.insert_text_analysis_data(tbl,c)
# return render(request, 'nasdaq.html', context={'result':csv_data})
return render(request, 'nasdaq.html')
def nasdaq_viz_page(request):
return render(request, 'nasdaq_viz.html')
# view for d3example
def d3_example_page(request):
return render(request, 'd3example.html')
| mit |
lewislone/mStocks | gadget/sdhub/tushare/stock/billboard.py | 13 | 11969 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
龙虎榜数据
Created on 2015年6月10日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from pandas.compat import StringIO
from tushare.stock import cons as ct
import numpy as np
import time
import re
import lxml.html
from lxml import etree
from tushare.util import dateu as du
from tushare.stock import ref_vars as rv
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def top_list(date = None, retry_count=3, pause=0.001):
"""
获取每日龙虎榜列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 如果为空,返回最近一个交易日的数据
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name :名称
pchange:涨跌幅
amount:龙虎榜成交额(万)
buy:买入额(万)
bratio:占总成交比例
sell:卖出额(万)
sratio :占总成交比例
reason:上榜原因
date :日期
"""
if date is None:
if du.get_hour() < 18:
date = du.last_tddate()
else:
date = du.today()
else:
if(du.is_holiday(date)):
return None
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'], date))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dt_1\"]")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr)[0]
df.columns = [i for i in range(1,12)]
df = df.apply(_f_rows, axis=1)
df = df.fillna(method='ffill')
df = df.drop([1, 4], axis=1)
df.columns = rv.LHB_COLS
df = df.drop_duplicates()
df['code'] = df['code'].astype(int)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['date'] = date
except:
pass
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def cap_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取个股上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _cap_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
if df is not None:
df = df.drop_duplicates('code')
return df
def _cap_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[0],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_GGTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _cap_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def broker_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取营业部上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
---------
broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
scount:卖出席位数
top3:买入前三股票
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _broker_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
return df
def _broker_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[1],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_YYTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _broker_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取机构席位追踪统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _inst_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[2],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([2,3], axis=1)
df.columns = rv.LHB_JGZZ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_detail(retry_count= 3, pause= 0.001):
"""
获取最近一个交易日机构席位成交明细统计数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
----------
code:股票代码
name:股票名称
date:交易日期
bamount:机构席位买入额(万)
samount:机构席位卖出额(万)
type:类型
"""
ct._write_head()
df = _inst_detail(pageNo=1, retry_count=retry_count,
pause=pause)
if len(df)>0:
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_detail(pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[3],
ct.PAGES['fd'], '', pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_JGMX_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_detail(pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def _f_rows(x):
if '%' in x[3]:
x[11] = x[6]
for i in range(6, 11):
x[i] = x[i-5]
for i in range(1, 6):
x[i] = np.NaN
return x
| mit |
Winand/pandas | pandas/tests/indexes/datetimes/test_setops.py | 1 | 14470 | from datetime import datetime
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, date_range, Series, bdate_range, DataFrame,
Int64Index, Index, to_datetime)
from pandas.tseries.offsets import Minute, BMonthEnd, MonthEnd
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndex(object):
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_freq_both_none(self):
# GH11086
expected = bdate_range('20150101', periods=10)
expected.freq = None
result = expected.union(expected)
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_union_dataframe_index(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
exp = pd.date_range('1/1/1980', '1/1/2012', freq='MS')
tm.assert_index_equal(df.index, exp)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_intersection(self):
# GH 4690 (with tz)
for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = date_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = date_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = DatetimeIndex([], name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
# non-monotonic
base = DatetimeIndex(['2011-01-05', '2011-01-04',
'2011-01-02', '2011-01-03'],
tz=tz, name='idx')
rng2 = DatetimeIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
tz=tz, name='idx')
expected2 = DatetimeIndex(
['2011-01-04', '2011-01-02'], tz=tz, name='idx')
rng3 = DatetimeIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
tz=tz, name='other')
expected3 = DatetimeIndex(
['2011-01-04', '2011-01-02'], tz=tz, name=None)
# GH 7880
rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz,
name='idx')
expected4 = DatetimeIndex([], tz=tz, name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq is None
assert result.tz == expected.tz
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
assert len(result) == 0
result = rng.intersection(rng[0:0])
assert len(result) == 0
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
assert len(result) == 0
def test_difference_freq(self):
# GH14323: difference of DatetimeIndex should not preserve frequency
index = date_range("20160920", "20160925", freq="D")
other = date_range("20160921", "20160924", freq="D")
expected = DatetimeIndex(["20160920", "20160925"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = date_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = DatetimeIndex(["20160920", "20160921"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
assert len(dti1.difference(dti2)) == 2
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
assert isinstance(result, DatetimeIndex)
assert result is result
result = dti.join(empty)
assert isinstance(result, DatetimeIndex)
def test_join_nonunique(self):
idx1 = to_datetime(['2012-11-06 16:00:11.477563',
'2012-11-06 16:00:11.477563'])
idx2 = to_datetime(['2012-11-06 15:11:09.006507',
'2012-11-06 15:11:09.006507'])
rs = idx1.join(idx2, how='outer')
assert rs.is_monotonic
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_union(self):
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_union = left.union(right)
assert isinstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_union = left.union(right)
assert isinstance(the_union, Index)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_union = left.union(right)
assert isinstance(the_union, DatetimeIndex)
# order does not matter
tm.assert_index_equal(right.union(left), the_union)
# overlapping, but different offset
rng = date_range(START, END, freq=BMonthEnd())
the_union = self.rng.union(rng)
assert isinstance(the_union, DatetimeIndex)
def test_outer_join(self):
# should just behave as union
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
# overlapping, but different offset
rng = date_range(START, END, freq=BMonthEnd())
the_join = self.rng.join(rng, how='outer')
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
def test_union_not_cacheable(self):
rng = date_range('1/1/2000', periods=50, freq=Minute())
rng1 = rng[10:]
rng2 = rng[:25]
the_union = rng1.union(rng2)
tm.assert_index_equal(the_union, rng)
rng1 = rng[10:]
rng2 = rng[15:35]
the_union = rng1.union(rng2)
expected = rng[10:]
tm.assert_index_equal(the_union, expected)
def test_intersection(self):
rng = date_range('1/1/2000', periods=50, freq=Minute())
rng1 = rng[10:]
rng2 = rng[:25]
the_int = rng1.intersection(rng2)
expected = rng[10:25]
tm.assert_index_equal(the_int, expected)
assert isinstance(the_int, DatetimeIndex)
assert the_int.offset == rng.offset
the_int = rng1.intersection(rng2.view(DatetimeIndex))
tm.assert_index_equal(the_int, expected)
# non-overlapping
the_int = rng[:10].intersection(rng[10:])
expected = DatetimeIndex([])
tm.assert_index_equal(the_int, expected)
def test_intersection_bug(self):
# GH #771
a = bdate_range('11/30/2011', '12/31/2011')
b = bdate_range('12/10/2011', '12/20/2011')
result = a.intersection(b)
tm.assert_index_equal(result, b)
def test_month_range_union_tz_pytz(self):
from pytz import timezone
tz = timezone('US/Eastern')
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz,
freq=MonthEnd())
late_dr = date_range(start=late_start, end=late_end, tz=tz,
freq=MonthEnd())
early_dr.union(late_dr)
def test_month_range_union_tz_dateutil(self):
tm._skip_if_windows_python_3()
from pandas._libs.tslibs.timezones import dateutil_gettz
tz = dateutil_gettz('US/Eastern')
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz,
freq=MonthEnd())
late_dr = date_range(start=late_start, end=late_end, tz=tz,
freq=MonthEnd())
early_dr.union(late_dr)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = cdate_range(START, END)
def test_union(self):
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_union = left.union(right)
assert isinstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_union = left.union(right)
assert isinstance(the_union, Index)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_union = left.union(right)
assert isinstance(the_union, DatetimeIndex)
# order does not matter
tm.assert_index_equal(right.union(left), the_union)
# overlapping, but different offset
rng = date_range(START, END, freq=BMonthEnd())
the_union = self.rng.union(rng)
assert isinstance(the_union, DatetimeIndex)
def test_outer_join(self):
# should just behave as union
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
assert isinstance(the_join, DatetimeIndex)
# overlapping, but different offset
rng = date_range(START, END, freq=BMonthEnd())
the_join = self.rng.join(rng, how='outer')
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
def test_intersection_bug(self):
# GH #771
a = cdate_range('11/30/2011', '12/31/2011')
b = cdate_range('12/10/2011', '12/20/2011')
result = a.intersection(b)
tm.assert_index_equal(result, b)
| bsd-3-clause |
akloster/bokeh | bokeh/compat/mplexporter/renderers/vega_renderer.py | 54 | 5284 | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| bsd-3-clause |
nixingyang/Kaggle-Face-Verification | Allstate Claims Severity/solution_LightGBM.py | 1 | 6216 | import os
import glob
import numpy as np
import pandas as pd
from pylightgbm.models import GBMRegressor
from sklearn.model_selection import ShuffleSplit
os.environ["LIGHTGBM_EXEC"] = "/opt/LightGBM/lightgbm"
# Data Set
DATASET_FOLDER_PATH = "./"
INPUT_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "input")
TRAIN_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "train.csv")
TEST_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "test.csv")
SUBMISSION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "submission")
ID_COLUMN_NAME = "id"
LABEL_COLUMN_NAME = "loss"
# Training Procedure
CROSS_VALIDATION_NUM = 10
NUM_ITERATIONS = 1000000
EARLY_STOPPING_ROUND = 200
def load_data():
# Read file content
train_file_content = pd.read_csv(TRAIN_FILE_PATH)
test_file_content = pd.read_csv(TEST_FILE_PATH)
combined_file_content = pd.concat([train_file_content, test_file_content])
del(train_file_content, test_file_content)
train_data_mask = combined_file_content[LABEL_COLUMN_NAME].notnull().as_matrix()
test_data_mask = combined_file_content[LABEL_COLUMN_NAME].isnull().as_matrix()
# Seperate the feature columns
feature_column_list = list(combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME], axis=1))
categorical_feature_column_list = [feature_column for feature_column in feature_column_list if feature_column.startswith("cat")]
# Process categorical features: remove obsolete unique values and factorize the values
for categorical_feature_column in categorical_feature_column_list:
unique_train_data_array = combined_file_content[categorical_feature_column][train_data_mask].unique()
unique_test_data_array = combined_file_content[categorical_feature_column][test_data_mask].unique()
unique_data_array_to_discard = np.setdiff1d(np.union1d(unique_train_data_array, unique_test_data_array),
np.intersect1d(unique_train_data_array, unique_test_data_array))
if len(unique_data_array_to_discard) > 0:
discard_function = lambda input_value: np.nan if input_value in unique_data_array_to_discard else input_value
combined_file_content[categorical_feature_column] = combined_file_content[categorical_feature_column].apply(discard_function)
combined_file_content[categorical_feature_column], _ = pd.factorize(combined_file_content[categorical_feature_column])
combined_file_content[categorical_feature_column] -= np.min(combined_file_content[categorical_feature_column])
# Separate the training and testing data set
X_array = combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME], axis=1).as_matrix()
Y_array = combined_file_content[LABEL_COLUMN_NAME].as_matrix()
ID_array = combined_file_content[ID_COLUMN_NAME].as_matrix()
X_train = X_array[train_data_mask]
Y_train = Y_array[train_data_mask]
X_test = X_array[test_data_mask]
ID_test = ID_array[test_data_mask]
submission_file_content = pd.DataFrame({ID_COLUMN_NAME:ID_test, LABEL_COLUMN_NAME:np.zeros(ID_test.shape[0])})
return X_train, Y_train, X_test, submission_file_content
def ensemble_predictions():
def _ensemble_predictions(ensemble_func, ensemble_submission_file_name):
ensemble_proba = ensemble_func(proba_array, axis=0)
ensemble_submission_file_content.loc[:, proba_columns] = ensemble_proba
ensemble_submission_file_content.to_csv(os.path.join(SUBMISSION_FOLDER_PATH, ensemble_submission_file_name), index=False)
# Read predictions
submission_file_path_list = glob.glob(os.path.join(SUBMISSION_FOLDER_PATH, "submission_*.csv"))
submission_file_content_list = [pd.read_csv(submission_file_path) for submission_file_path in submission_file_path_list]
ensemble_submission_file_content = submission_file_content_list[0]
# Concatenate predictions
proba_columns = list(set(ensemble_submission_file_content) - {ID_COLUMN_NAME})
proba_list = [np.expand_dims(submission_file_content.as_matrix(proba_columns), axis=0)
for submission_file_content in submission_file_content_list]
proba_array = np.vstack(proba_list)
# Ensemble predictions
for ensemble_func, ensemble_submission_file_name in \
zip([np.max, np.min, np.mean, np.median], ["max.csv", "min.csv", "mean.csv", "median.csv"]):
_ensemble_predictions(ensemble_func, ensemble_submission_file_name)
def run():
# Load data set
X_train, Y_train, X_test, submission_file_content = load_data()
Y_train = np.log(Y_train + 200)
# Cross validation
cross_validation_iterator = ShuffleSplit(n_splits=CROSS_VALIDATION_NUM, test_size=0.1, random_state=0)
for cross_validation_index, (train_index, valid_index) in enumerate(cross_validation_iterator.split(X_train), start=1):
print("Working on {}/{} ...".format(cross_validation_index, CROSS_VALIDATION_NUM))
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "submission_{}.csv".format(cross_validation_index))
if os.path.isfile(submission_file_path):
continue
model = GBMRegressor(
learning_rate=0.01,
num_iterations=NUM_ITERATIONS,
num_leaves=200,
min_data_in_leaf=10,
feature_fraction=0.3,
feature_fraction_seed=cross_validation_index,
bagging_fraction=0.8,
bagging_freq=10,
bagging_seed=cross_validation_index,
metric="l1",
metric_freq=10,
early_stopping_round=EARLY_STOPPING_ROUND,
num_threads=-1)
model.fit(X_train[train_index], Y_train[train_index], test_data=[(X_train[valid_index], Y_train[valid_index])])
# Perform the testing procedure
Y_test = model.predict(X_test)
# Save submission to disk
if not os.path.isdir(SUBMISSION_FOLDER_PATH):
os.makedirs(SUBMISSION_FOLDER_PATH)
submission_file_content[LABEL_COLUMN_NAME] = np.exp(Y_test) - 200
submission_file_content.to_csv(submission_file_path, index=False)
# Perform ensembling
ensemble_predictions()
print("All done!")
if __name__ == "__main__":
run()
| mit |
fivejjs/crosscat | crosscat/binary_creation/setup.py | 1 | 2348 | #
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the application
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the application
import sys
#
import cx_Freeze
excludes = [
'FixTk',
'Tkconstants',
'Tkinter',
]
includes = [
'crosscat.tests.mutual_information_test_utils',
'crosscat.tests.timing_test_utils',
'crosscat.utils.data_utils',
'crosscat.utils.file_utils',
'crosscat.utils.inference_utils',
'crosscat.utils.convergence_test_utils',
'crosscat.LocalEngine',
'crosscat.HadoopEngine',
'crosscat.cython_code.State',
'crosscat.utils.xnet_utils',
'crosscat.utils.general_utils',
'crosscat.utils.sample_utils',
'numpy',
'sklearn.metrics',
'sklearn.utils.lgamma',
'scipy.special',
'scipy.sparse.csgraph._validation',
]
buildOptions = dict(
excludes = excludes,
includes = includes,
compressed = False,
)
executables = [
cx_Freeze.Executable("hadoop_line_processor.py", base = None)
]
cx_Freeze.setup(
name = "hadoop_line_processor",
version = "0.1",
description = "process arbitrary engine commands on hadoop",
executables = executables,
options = dict(build_exe = buildOptions))
| apache-2.0 |
gwulfs/zipline | tests/test_assets.py | 2 | 31042 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'real_but_old')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that pd.NaT for knowledge_date uses the value of as_of_date
ad_contracts = finder.lookup_future_chain('AD', dt, pd.NaT)
self.assertEqual(len(ad_contracts), 2)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
# Check that pd.NaT for as_of_date gives the whole chain
ad_contracts = finder.lookup_future_chain('AD', pd.NaT, first_day)
self.assertEqual(len(ad_contracts), 4)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date=pd.Timestamp('2006-02-01', tz='UTC'))
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = pd.Timestamp('2006-02-01', tz='UTC')
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp(datetime(year=2005, month=2, day=1), tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain.
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| apache-2.0 |
equialgo/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
dseuss/stuff | imageprocessing/jpeg.py | 1 | 1990 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
import matplotlib.pyplot as pl
import numpy as np
import scipy.fftpack as fft
from tools.plot import imsshow, rgb2gray
THRESHMAT = np.array([[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 50, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 255],
[59, 64, 78, 87, 103, 255, 255, 255],
[72, 91, 95, 98, 112, 255, 255, 255]], dtype=np.uint8)
THRESHMAT = THRESHMAT // 8
def extract_blocks_2D(ary, bs):
# TODO Check if this is the right continuation mode
padded = np.pad(ary, ((0, -ary.shape[0] % bs[0]), (0, -ary.shape[1] % bs[1])),
mode='edge')
splits = [xrange(bs[i], padded.shape[i], bs[i]) for i in (0, 1)]
return np.array([np.split(subimg, splits[1], axis=1)
for subimg in np.split(padded, splits[0])])
def blocks2img(blocks):
return np.vstack([np.hstack(row) for row in blocks])
def quantize(ary, thresh):
res = thresh * np.floor(ary // thresh)
return res
if __name__ == '__main__':
img = rgb2gray(pl.imread('Lenna.png'))
img = (img * 255).astype(np.uint8)
pl.gray()
blocksize = (8, 8)
blocks = extract_blocks_2D(img, bs=blocksize)
blockshape = blocks.shape[:2]
blocks = blocks.reshape((-1, ) + blocksize)
compressed = np.array([quantize(fft.dct(b.astype(float), norm='ortho'), THRESHMAT) for b in blocks])
img_c = blocks2img(np.reshape([fft.idct(b.astype(float), norm='ortho') for b in compressed],
blockshape + blocksize))
pl.subplot(121)
pl.hist(np.ravel(blocks), bins=60)
pl.subplot(122)
pl.hist(np.ravel(compressed), bins=60)
pl.show()
imsshow((img, img_c))
| unlicense |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/misc/longshort.py | 6 | 1676 | """
Illustrate the rec array utility funcitons by loading prices from a
csv file, computing the daily returns, appending the results to the
record arrays, joining on date
"""
import urllib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
# grab the price data off yahoo
u1 = urllib.urlretrieve('http://ichart.finance.yahoo.com/table.csv?s=AAPL&d=9&e=14&f=2008&g=d&a=8&b=7&c=1984&ignore=.csv')
u2 = urllib.urlretrieve('http://ichart.finance.yahoo.com/table.csv?s=GOOG&d=9&e=14&f=2008&g=d&a=8&b=7&c=1984&ignore=.csv')
# load the CSV files into record arrays
r1 = mlab.csv2rec(file(u1[0]))
r2 = mlab.csv2rec(file(u2[0]))
# compute the daily returns and add these columns to the arrays
gains1 = np.zeros_like(r1.adj_close)
gains2 = np.zeros_like(r2.adj_close)
gains1[1:] = np.diff(r1.adj_close)/r1.adj_close[:-1]
gains2[1:] = np.diff(r2.adj_close)/r2.adj_close[:-1]
r1 = mlab.rec_append_fields(r1, 'gains', gains1)
r2 = mlab.rec_append_fields(r2, 'gains', gains2)
# now join them by date; the default postfixes are 1 and 2. The
# default jointype is inner so it will do an intersection of dates and
# drop the dates in AAPL which occurred before GOOG started trading in
# 2004. r1 and r2 are reverse ordered by date since Yahoo returns
# most recent first in the CSV files, but rec_join will sort by key so
# r below will be properly sorted
r = mlab.rec_join('date', r1, r2)
# long appl, short goog
g = r.gains1-r.gains2
tr = (1+g).cumprod() # the total return
# plot the return
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r.date, tr)
ax.set_title('total return: long APPL, short GOOG')
ax.grid()
fig.autofmt_xdate()
plt.show()
| mit |
luo66/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
adrn/GaiaPairsFollowup | paper/figures/fit_isochrones.py | 1 | 7407 | """
Fit isochrones to the stars in the 3 highlighted pairs
"""
# Standard library
from collections import OrderedDict
from os import path
import os
# Third-party
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
import emcee
from tqdm import tqdm
# Package
from comoving_rv.log import logger
from comoving_rv.db import Session, Base, db_connect
from comoving_rv.db.model import (Run, Observation, TGASSource, SimbadInfo,
GroupToObservations, SpectralLineInfo,
SpectralLineMeasurement, RVMeasurement)
from isochrones import StarModel
# from isochrones.mist import MIST_Isochrone
# iso = MIST_Isochrone() # interpolation issues with MIST isochrones
from isochrones.dartmouth import Dartmouth_Isochrone
iso = Dartmouth_Isochrone()
def main():
# TODO: bad, hard-coded...
# base_path = '/Volumes/ProjectData/gaia-comoving-followup/'
base_path = '../../data/'
db_path = path.join(base_path, 'db.sqlite')
engine = db_connect(db_path)
session = Session()
chain_path = path.abspath('./isochrone_chains')
os.makedirs(chain_path, exist_ok=True)
# Check out the bottom of "Color-magnitude diagram.ipynb":
interesting_group_ids = [1500, 1229, 1515]
all_photometry = OrderedDict([
('1500-8455', OrderedDict([('J', (6.8379998, 0.021)),
('H', (6.4640002, 0.017000001)),
('K', (6.3369999, 0.017999999)),
('W1', (6.2950001, 0.093000002)),
('W2', (6.2490001, 0.026000001)),
('W3', (6.3330002, 0.015)),
('B', (9.5950003, 0.022)),
('V', (8.5120001, 0.014))])),
('1500-1804', OrderedDict([('J', (6.9039998, 0.041000001)),
('H', (6.8559999, 0.027000001)),
('K', (6.7989998, 0.017000001)),
('W1', (6.803, 0.064999998)),
('W2', (6.7600002, 0.018999999)),
('W3', (6.8270001, 0.016000001)),
('B', (7.4980001, 0.015)),
('V', (7.289, 0.011))])),
('1229-1366', OrderedDict([('J', (6.7290001, 0.024)),
('H', (6.2449999, 0.02)),
('K', (6.1529999, 0.023)),
('W1', (6.1799998, 0.096000001)),
('W2', (6.04, 0.035)),
('W3', (6.132, 0.016000001)),
('B', (9.5539999, 0.021)),
('V', (8.4619999, 0.014))])),
('1229-7470', OrderedDict([('J', (9.1709995, 0.024)),
('H', (8.7959995, 0.026000001)),
('K', (8.7299995, 0.022)),
('W1', (8.6669998, 0.023)),
('W2', (8.7189999, 0.02)),
('W3', (8.6680002, 0.025)),
('B', (11.428, 0.054000001)),
('V', (10.614, 0.039999999))])),
('1515-3584', OrderedDict([('J', (5.363999843597412, 0.024000000208616257)),
('H', (4.965000152587891, 0.035999998450279236)),
('K', (4.815999984741211, 0.032999999821186066)),
('W1', (4.758, 0.215)),
('W2', (4.565, 0.115)),
('W3', (4.771, 0.015)),
('B', (8.347999572753906, 0.01600000075995922)),
('V', (7.182000160217285, 0.009999999776482582))])),
('1515-1834', OrderedDict([('J', (8.855999946594238, 0.024000000208616257)),
('H', (8.29699993133545, 0.020999999716877937)),
('K', (8.178999900817871, 0.017999999225139618)),
('W1', (8.117, 0.022)),
('W2', (8.15, 0.019)),
('W3', (8.065, 0.02)),
('B', (12.309000015258789, 0.11999999731779099)),
('V', (11.069999694824219, 0.054999999701976776))]))
])
for k in all_photometry:
samples_file = path.join(chain_path, '{0}.hdf5'.format(k))
if path.exists(samples_file):
logger.info("skipping {0} - samples exist at {1}"
.format(k, samples_file))
continue
phot = all_photometry[k]
obs = session.query(Observation).filter(Observation.object == k).one()
plx = (obs.tgas_source.parallax, obs.tgas_source.parallax_error)
# fit an isochrone
model = StarModel(iso, use_emcee=True, parallax=plx, **phot)
model.set_bounds(mass=(0.01, 20),
feh=(-1, 1),
distance=(0, 300),
AV=(0, 1))
# initial conditions for emcee walkers
nwalkers = 128
p0 = []
m0, age0, feh0 = model.ic.random_points(nwalkers,
minmass=0.01, maxmass=10.,
minfeh=-1, maxfeh=1)
_, max_distance = model.bounds('distance')
_, max_AV = model.bounds('AV')
d0 = 10**(np.random.uniform(0,np.log10(max_distance),size=nwalkers))
AV0 = np.random.uniform(0, max_AV, size=nwalkers)
p0 += [m0]
p0 += [age0, feh0, d0, AV0]
p0 = np.array(p0).T
npars = p0.shape[1]
# run emcee
ninit = 256
nburn = 1024
niter = 4096
logger.debug('Running emcee - initial sampling...')
sampler = emcee.EnsembleSampler(nwalkers, npars, model.lnpost)
# pos, prob, state = sampler.run_mcmc(p0, ninit)
for pos, prob, state in tqdm(sampler.sample(p0, iterations=ninit),
total=ninit):
pass
# cull the weak walkers
best_ix = sampler.flatlnprobability.argmax()
best_p0 = (sampler.flatchain[best_ix][None] +
np.random.normal(0, 1E-5, size=(nwalkers, npars)))
sampler.reset()
logger.debug('burn-in...')
for pos, prob, state in tqdm(sampler.sample(best_p0, iterations=nburn),
total=nburn):
pass
# pos,_,_ = sampler.run_mcmc(best_p0, nburn)
sampler.reset()
logger.debug('sampling...')
# _ = sampler.run_mcmc(pos, niter)
for pos, prob, state in tqdm(sampler.sample(pos, iterations=niter),
total=niter):
pass
model._sampler = sampler
model._make_samples(0.08)
model.samples.to_hdf(samples_file, key='samples')
# np.save('isochrone_chains/chain.npy', sampler.chain)
logger.debug('...done and saved!')
if __name__ == '__main__':
import logging
logger.setLevel(logging.DEBUG)
main()
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/cluster/plot_face_ward_segmentation.py | 1 | 2458 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.cluster import AgglomerativeClustering
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import SkipTest
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| mit |
schets/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
dallascard/guac | core/dataset_scripts/mfc/create_label_for_mixed_dataset.py | 1 | 1212 | import os
import glob
from optparse import OptionParser
import pandas as pd
from ...util import dirs
from ...util import file_handling as fh
def main():
usage = "%prog project"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project = args[0]
dirs.make_base_dir(project)
files = glob.glob(os.path.join(dirs.data_raw_sentences_dir, '*.txt'))
files.sort()
print "Found %d file" % len(files)
names = [os.path.splitext(os.path.basename(f))[0] for f in files]
codes = list(set([f[:3] for f in names ]))
n_codes = len(codes)
codes.sort()
code_index = dict(zip(codes, range(n_codes)))
df = pd.DataFrame(index=names)
file_labels = [code_index[f[:3]] for f in names]
df['issue'] = file_labels
output_filename = os.path.join(dirs.data_raw_labels_dir, 'issue.csv')
df.to_csv(output_filename)
if __name__ == '__main__':
main()
| apache-2.0 |
nchammas/spark | python/pyspark/sql/dataframe.py | 4 | 99864 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise Exception("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str or :class:`Column`
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise ValueError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise ValueError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
harshhemani/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
siddharthhparikh/INFM750-project | zipcode_Naam_hu_maari_rite_aapish.py | 1 | 7161 | def calculate_income_range(income):
"""
if income < 25000:
return 0
elif income < 50000:
return 1
elif income < 75000:
return 2
else:
return 3
"""
return int(income/10000)
import csv
import random
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import r2_score
from sklearn.cross_validation import train_test_split
import pandas as pd
#import numpy as np
data = {}
violation = {}
i=0
with open('datasets/data_boston-2.csv', 'r') as csvfile:
#with open('C:\Viral\Courses\INFM 750\Data\data_boston.csv', 'r') as csvfile:
csvfile.readline()
file = csv.reader(csvfile, delimiter=',')
for row in file:
if row[17] != '' and row[18] != '':
if not violation.has_key(row[5]):
violation[row[5]] = i
i=i+1
if data.has_key(row[12]):
data[row[12]][-1] = data[row[12]][-1] + 1
else:
data[row[12]] = [int(row[12]), float(row[17]), float(row[18]), float(row[21]), float(row[20]), 1]
##normalizing the volume of violations with the population of zipcodes
dat = {}
for key, value in data.iteritems():
if value[-1] > 2000:
dat[key] = data[key]
dat[key][-1] = value[-1]/value[-2]
j=0
score = 0
err = 0
data_list = []
for key, value in dat.iteritems():
data_list.append(value)
from sklearn.linear_model import Ridge
#print data_list
<<<<<<< HEAD:zipcode.py
=======
import math
def r2score(y_actual, y_pred):
tss = 0
mse = 0
for a,b in zip(y_actual[0], y_pred):
mse += (a-b[0])*(a-b[0])
mse = mse/len(y_pred)
#print "len(y_actual[0]) = ", len(y_actual[0])
avg = 0
for a in y_actual[0]:
avg += a
avg = avg/len(y_actual[0])
#print "avg = ", avg
for a in y_actual[0]:
#print a
tss += (a-avg)*(a-avg)
r2 = 1-(len(y_actual[0])*mse/tss)
return r2
>>>>>>> 6c1fb023788e784aa005e47e44649d928d4ef8a4:zipcode_Naam_hu_maari_rite_aapish.py
while j<1000:
test_data_list = []
test_data_label = []
train_data_list = []
train_data_label = []
i=0
random.shuffle(data_list);
df_data_list = pd.DataFrame(data_list)
df_data_list.columns = ['zipcode','medianincome','collegedegree','houseowner','population','volumeofviolations']
##removing population column from the df
df_data_list = df_data_list.drop('population', 1)
for col in df_data_list:
if col != 'zipcode':
df_data_list[col] = (df_data_list[col] - df_data_list[col].mean())/df_data_list[col].std(ddof=0)
##create test and train df
train, test = train_test_split(df_data_list, test_size = 0.2)
# print test
train_data_label = train[['volumeofviolations']]
train_data_list = train[['medianincome','collegedegree','houseowner']]
test_data_label = test[['volumeofviolations']]
test_data_list = test[['medianincome','collegedegree','houseowner']]
# print train
regr = linear_model.LinearRegression()
regr.fit(train_data_list, train_data_label)
err = err + r2_score(test_data_label, regr.predict(test_data_list));
# Explained variance score: 1 is perfect prediction
score = score + regr.score(test_data_list, test_data_label)
# for value in data_list:
# if i>20:
# test_data_list.append(value[:-2])
# test_data_label.append(value[-1])
# else:
# train_data_list.append(value[:-2])
# train_data_label.append(value[-1])
# i=i+1
# df_test_data_list = pd.DataFrame(test_data_list)
## print df_test_data_list
# df_test_data_label = pd.DataFrame(test_data_label)
# df_train_data_list = pd.DataFrame(train_data_list)
# #print df_train_data
# df_train_data_label = pd.DataFrame(train_data_label)
# for col in df_test_data_list:
# df_test_data_list[col] = (df_test_data_list[col] - df_test_data_list[col].mean())/df_test_data_list[col].std(ddof=0)
# for col in df_test_data_label:
# df_test_data_label[col] = (df_test_data_label[col] - df_test_data_label[col].mean())/df_test_data_label[col].std(ddof=0)
# for col in df_train_data_list:
# df_train_data_list[col] = (df_train_data_list[col] - df_train_data_list[col].mean())/df_train_data_list[col].std(ddof=0)
# for col in df_train_data_label:
# df_train_data_label[col] = (df_train_data_label[col] - df_train_data_label[col].mean())/df_train_data_label[col].std(ddof=0)
##polynomial regression
# poly = PolynomialFeatures(degree=2)
# df_train_data_list_poly = poly.fit_transform(df_train_data_list)
# df_test_data_list_poly = poly.fit_transform(df_test_data_list)
#
for value in data_list:
if i>18:
test_data_list.append(value[:-2])
test_data_label.append(value[-1])
else:
train_data_list.append(value[:-2])
train_data_label.append(value[-1])
i=i+1
df_test_data_list = pd.DataFrame(test_data_list)
# print df_test_data_list
df_test_data_label = pd.DataFrame(test_data_label)
df_train_data_list = pd.DataFrame(train_data_list)
#print df_train_data
df_train_data_label = pd.DataFrame(train_data_label)
for col in df_test_data_list:
df_test_data_list[col] = (df_test_data_list[col] - df_test_data_list[col].mean())/df_test_data_list[col].std(ddof=0)
for col in df_test_data_label:
df_test_data_label[col] = (df_test_data_label[col] - df_test_data_label[col].mean())/df_test_data_label[col].std(ddof=0)
for col in df_train_data_list:
df_train_data_list[col] = (df_train_data_list[col] - df_train_data_list[col].mean())/df_train_data_list[col].std(ddof=0)
for col in df_train_data_label:
df_train_data_label[col] = (df_train_data_label[col] - df_train_data_label[col].mean())/df_train_data_label[col].std(ddof=0)
# print df_test_data_list
# print df_test_data_label
# print df_train_data_list
# print df_train_data_label
# test_data = pd.Dataframe.from_dict(test_data_list,orient='index')
# test_label = pd.Dataframe.from_dict(test_data_label,orient='index')
#print "test :"; print test_data_list; print "train : "; print train_data_list;
# regr = linear_model.LinearRegression()
# regr.fit(df_train_data_list, df_train_data_label)
# err = err + r2_score(df_test_data_label, regr.predict(df_test_data_list)); #print r2_score(test_data_label, regr.predict(test_data_list))
# # Explained variance score: 1 is perfect prediction
# score = score + regr.score(df_test_data_list, df_test_data_label)
regr = linear_model.LinearRegression()
regr.fit(df_train_data_list, df_train_data_label)
y_pred = regr.predict(df_test_data_list)
y_actual = df_test_data_label
#print len(y_pred),len(y_actual)
err = err + r2score(df_test_data_label, regr.predict(df_test_data_list)); #print r2_score(test_data_label, regr.predict(test_data_list))
# Explained variance score: 1 is perfect prediction
score = score + regr.score(df_test_data_list, df_test_data_label)
j=j+1
print err/j | apache-2.0 |
huobaowangxi/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
hanase/synthpop | synthpop/recipes/starter.py | 1 | 8774 | from .. import categorizer as cat
from ..census_helpers import Census
import pandas as pd
# TODO DOCSTRINGS!!
class Starter:
"""
This is a recipe for getting the marginals and joint distributions to use
to pass to the synthesizer using simple categories - population, age,
race, and sex for people, and children, income, cars, and workers for
households. This module is responsible for
Parameters
----------
c : object
census_helpers.Census object
state : string
FIPS code the state
county : string
FIPS code for the county
tract : string, optional
FIPS code for a specific track or None for all tracts in the county
Returns
-------
household_marginals : DataFrame
Marginals per block group for the household data (from ACS)
person_marginals : DataFrame
Marginals per block group for the person data (from ACS)
household_jointdist : DataFrame
joint distributions for the households (from PUMS), one joint
distribution for each PUMA (one row per PUMA)
person_jointdist : DataFrame
joint distributions for the persons (from PUMS), one joint
distribution for each PUMA (one row per PUMA)
tract_to_puma_map : dictionary
keys are tract ids and pumas are puma ids
"""
def __init__(self, key, state, county, tract=None):
self.c = c = Census(key)
self.state = state
self.county = county
self.tract = tract
income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]
vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]
workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]
families_columns = ['B11001_001E', 'B11001_002E']
block_group_columns = income_columns + families_columns
tract_columns = vehicle_columns + workers_columns
h_acs = c.block_group_and_tract_query(block_group_columns,
tract_columns, state, county,
merge_columns=['tract', 'county',
'state'],
block_group_size_attr="B11001_001E",
tract_size_attr="B08201_001E",
tract=tract)
self.h_acs_cat = cat.categorize(h_acs, {
("children", "yes"): "B11001_002E",
("children", "no"): "B11001_001E - B11001_002E",
("income", "lt35"): "B19001_002E + B19001_003E + B19001_004E + "
"B19001_005E + B19001_006E + B19001_007E",
("income", "gt35-lt100"): "B19001_008E + B19001_009E + "
"B19001_010E + B19001_011E + B19001_012E"
"+ B19001_013E",
("income", "gt100"): "B19001_014E + B19001_015E + B19001_016E"
"+ B19001_017E",
("cars", "none"): "B08201_002E",
("cars", "one"): "B08201_003E",
("cars", "two or more"): "B08201_004E + B08201_005E + B08201_006E",
("workers", "none"): "B08202_002E",
("workers", "one"): "B08202_003E",
("workers", "two or more"): "B08202_004E + B08202_005E"
}, index_cols=['state', 'county', 'tract', 'block group'])
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
p_acs = c.block_group_query(all_columns, state, county, tract=tract)
self.p_acs_cat = cat.categorize(p_acs, {
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['state', 'county', 'tract', 'block group'])
def get_geography_name(self):
# this synthesis is at the block group level for most variables
return "block_group"
def get_num_geographies(self):
return len(self.p_acs_cat)
def get_available_geography_ids(self):
# return the ids of the geographies, in this case a state, county,
# tract, block_group id tuple
for tup in self.p_acs_cat.index:
yield pd.Series(tup, index=self.p_acs_cat.index.names)
def get_household_marginal_for_geography(self, ind):
return self.h_acs_cat.loc[tuple(ind.values)]
def get_person_marginal_for_geography(self, ind):
return self.p_acs_cat.loc[tuple(ind.values)]
def get_household_joint_dist_for_geography(self, ind):
c = self.c
puma = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
h_pums = self.c.download_household_pums(ind.state, puma)
def cars_cat(r):
if r.VEH == 0:
return "none"
elif r.VEH == 1:
return "one"
return "two or more"
def children_cat(r):
if r.NOC > 0:
return "yes"
return "no"
def income_cat(r):
if r.FINCP > 100000:
return "gt100"
elif r.FINCP > 35000:
return "gt35-lt100"
return "lt35"
def workers_cat(r):
if r.WIF == 3:
return "two or more"
elif r.WIF == 2:
return "two or more"
elif r.WIF == 1:
return "one"
return "none"
h_pums, jd_households = cat.joint_distribution(
h_pums,
cat.category_combinations(self.h_acs_cat.columns),
{"cars": cars_cat, "children": children_cat,
"income": income_cat, "workers": workers_cat}
)
return h_pums, jd_households
def get_person_joint_dist_for_geography(self, ind):
c = self.c
puma = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
p_pums = self.c.download_population_pums(ind.state, puma)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
p_pums, jd_persons = cat.joint_distribution(
p_pums,
cat.category_combinations(self.p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
return p_pums, jd_persons
| bsd-3-clause |
ContinuumIO/chaco | chaco/color_mapper.py | 1 | 15082 | """ Defines the ColorMapper and ColorMapTemplate classes.
"""
# Major library imports
from types import IntType, FloatType
from numpy import arange, array, asarray, clip, divide, float32, int8, isinf, \
isnan, ones, searchsorted, sometrue, sort, take, uint8, where, zeros, \
linspace, ones_like
# Enthought library imports
from traits.api import Any, Array, Bool, Dict, Event, Float, HasTraits, \
Int, Property, Str, Trait
# Relative imports
from abstract_colormap import AbstractColormap
from data_range_1d import DataRange1D
from speedups import map_colors
class ColorMapTemplate(HasTraits):
"""
A class representing the state of a ColorMapper, for use when persisting
plots.
"""
# The segment data of the color map.
segment_map = Any
# The number of steps in the color map.
steps = Int(256)
# Low end of the color map range.
range_low_setting = Trait('auto', 'auto', Float)
# High end of the color map range.
range_high_setting = Trait('auto', 'auto', Float)
def __init__(self, colormap=None, **kwtraits):
"""
Creates this template from a color map instance or creates an empty
template.
"""
if colormap:
self.from_colormap(colormap)
return
def from_colormap(self, colormap):
""" Populates this template from a color map.
"""
self.segment_map = colormap._segmentdata.copy()
self.steps = colormap.steps
self.range_low_setting = colormap.range.low_setting
self.range_high_setting = colormap.range.high_setting
return
def to_colormap(self, range=None):
""" Returns a ColorMapper instance from this template.
"""
colormap = ColorMapper(self.segment_map, steps = self.steps)
if range:
colormap.range = range
else:
colormap.range = DataRange1D(low = self.range_low_setting,
high = self.range_high_setting)
return colormap
class ColorMapper(AbstractColormap):
""" Represents a simple band-of-colors style of color map.
The look-up transfer function is a simple linear function between defined
intensities. There is no limit to the number of steps that can be
defined. If the segment intervals contain very few array
locations, quantization errors will occur.
Construction of a ColorMapper can be done through the factory methods
from_palette_array() and from_segment_map(). Do not make direct calls to the
ColorMapper constructor.
"""
# The color table.
color_bands = Property(Array)
# The total number of color steps in the map.
steps = Int(256)
# The name of this color map.
name = Str
# Not used.
low_pos = None
# Not used.
high_pos = None
# A generic "update" event that generally means that anything that relies
# on this mapper for visual output should do a redraw or repaint.
updated = Event
# Are the mapping arrays out of date?
_dirty = Bool(True)
# The raw segment data for creating the mapping array.
_segmentdata = Dict # (Str, Tuple | List)
#------------------------------------------------------------------------
# Static methods.
#------------------------------------------------------------------------
@classmethod
def from_palette_array(cls, palette, **traits):
""" Creates a ColorMapper from a palette array.
The palette colors are linearly interpolated across the range of
mapped values.
The *palette* parameter is a Nx3 or Nx4 array of intensity values, where
N > 1::
[[R0, G0, B0], ... [R(N-1), G(N-1), B(N-1)]]
[[R0, G0, B0, A0], ... [R(N-1), G(N-1), B(N-1), A(N-1]]
"""
palette = asarray(palette)
n_colors, n_components = palette.shape
if n_colors < 2:
raise ValueError("Palette must contain at least two colors.")
if n_components not in (3,4):
raise ValueError("Palette must be of RGB or RGBA colors. "
"Got %s color components." % n_components)
# Compute the % offset for each of the color locations.
offsets = linspace(0.0, 1.0, n_colors)
# From the offsets and the color data, generate a segment map.
segment_map = {}
red_values = palette[:,0]
segment_map['red'] = zip(offsets, red_values, red_values)
green_values = palette[:,1]
segment_map['green'] = zip(offsets, green_values, green_values)
blue_values = palette[:,2]
segment_map['blue'] = zip(offsets, blue_values, blue_values)
if n_components == 3:
alpha_values = ones(n_colors)
else:
alpha_values = palette[:,3]
segment_map['alpha'] = zip(offsets, alpha_values, alpha_values)
return cls(segment_map, **traits)
@classmethod
def from_segment_map(cls, segment_map, **traits):
""" Creates a Colormapper from a segment map.
The *segment_map* parameter is a dictionary with 'red', 'green', and
'blue' (and optionally 'alpha') entries. Each entry is a list of
(x, y0, y1) tuples:
* x: an offset in [0..1] (offsets within the list must be in ascending order)
* y0: value for the color channel for values less than or equal to x
* y1: value for the color channel for values greater than x
When a data value gets mapped to a color, it will be normalized to be
within [0..1]. For each RGB(A) component, the two adjacent values will
be found in the segment_map. The mapped component value will be found by
linearly interpolating the two values.
Generally, y0==y1. Colormaps with sharp transitions will have y0!=y1 at
the transitions.
"""
if 'alpha' not in segment_map:
segment_map = segment_map.copy()
segment_map['alpha'] = [(0.0, 1.0, 1.0), (1.0, 1.0, 1.0)]
return cls(segment_map, **traits)
@classmethod
def from_file(cls, filename, **traits):
""" Creates a ColorMapper from a file.
The *filename* parameter is the name of a file whose lines each contain
4 or 5 float values between 0.0 and 1.0. The first value is an offset in
the range [0..1], and the remaining 3 or 4 values are red, green, blue,
and optionally alpha values for the color corresponding to that offset.
The first line is assumed to contain the name of the colormap.
"""
colormap_file = open(filename, 'r')
lines = colormap_file.readlines()
colormap_file.close()
rgba_arr = [[],[],[],[]]
for line in lines[1:]:
strvalues = line.strip().split()
values = [float32(value) for value in strvalues]
if len(values) > 4:
channels = (0,1,2,3)
else:
channels = (0,1,2)
for i in channels:
channeltuple = (values[0], values[i+1], values[i+1])
rgba_arr[i].append(channeltuple)
# Alpha is frequently unspecified.
if len(rgba_arr[-1]) == 0:
rgba_arr[-1] = [(0.0, 1.0, 1.0), (1.0, 1.0, 1.0)]
if 'name' not in traits:
# Don't override the code.
traits['name'] = lines[0].strip()
rgba_dict = {
'red': rgba_arr[0],
'green': rgba_arr[1],
'blue': rgba_arr[2],
'alpha': rgba_arr[3],
}
return cls(rgba_dict, **traits)
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, segmentdata, **kwtraits):
""" Creates a Colormapper from a segment map.
The *segment_map* parameter is a dictionary with 'red', 'green', and
'blue' (and optionally 'alpha') entries. Each entry is a list of
(x, y0, y1) tuples:
* x: an offset in [0..1] (offsets within the list must be in ascending order)
* y0: value for the color channel for values less than or equal to x
* y1: value for the color channel for values greater than x
When a data value gets mapped to a color, it will be normalized to be
within [0..1]. For each RGB(A) component, the two adjacent values will
be found in the segment_map. The mapped component value will be found by
linearly interpolating the two values.
Generally, y0==y1. Colormaps with sharp transitions will have y0!=y1 at
the transitions.
"""
self._segmentdata = segmentdata
super(ColorMapper, self).__init__(**kwtraits)
return
def map_screen(self, data_array):
""" Maps an array of data values to an array of colors.
"""
if self._dirty:
self._recalculate()
rgba = map_colors(data_array, self.steps, self.range.low,
self.range.high, self._red_lut, self._green_lut,
self._blue_lut, self._alpha_lut)
return rgba
def map_index(self, ary):
""" Maps an array of values to their corresponding color band index.
"""
if self._dirty:
self._recalculate()
indices = (ary - self.range.low) / (self.range.high - self.range.low) * self.steps
return clip(indices.astype(IntType), 0, self.steps - 1)
def reverse_colormap(self):
""" Reverses the color bands of this colormap.
"""
for name in ("red", "green", "blue", "alpha"):
data = asarray(self._segmentdata[name])
data[:, (1,2)] = data[:, (2,1)]
data[:,0] = (1.0 - data[:,0])
self._segmentdata[name] = data[::-1]
self._recalculate()
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _get_color_bands(self):
""" Gets the color bands array.
"""
if self._dirty:
self._recalculate()
luts = [self._red_lut, self._green_lut, self._blue_lut]
if self.color_depth is 'rgba':
luts.append(self._alpha_lut)
result = zip(*luts)
return result
def _recalculate(self):
""" Recalculates the mapping arrays.
"""
self._red_lut = self._make_mapping_array(
self.steps, self._segmentdata['red']
)
self._green_lut = self._make_mapping_array(
self.steps, self._segmentdata['green']
)
self._blue_lut = self._make_mapping_array(
self.steps, self._segmentdata['blue']
)
self._alpha_lut = self._make_mapping_array(
self.steps, self._segmentdata['alpha']
)
self.updated = True
self._dirty = False
return
#### matplotlib ####
def _make_mapping_array(self, n, data):
"""Creates an N-element 1-D lookup table
The *data* parameter is a list of x,y0,y1 mapping correspondences (which
can be lists or tuples), where all the items are values between 0 and 1,
inclusive. The items in the mapping are:
* x: a value being mapped
* y0: the value of y for values of x less than or equal to the given x value.
* y1: the value of y for values of x greater than the given x value.
The two values of y allow for discontinuous mapping functions (for
example, as might be found in a sawtooth function)
The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where result[x*(N-1)]
gives the closest value for values of x between 0 and 1.
"""
try:
adata = array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if sometrue(sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (n-1)
lut = zeros((n,), float32)
xind = arange(float32(n), dtype=float32)
ind = searchsorted(x, xind)[1:-1]
lut[1:-1] = ( divide(xind[1:-1] - take(x,ind-1),
take(x,ind)-take(x,ind-1) )
*(take(y0,ind)-take(y1,ind-1)) + take(y1,ind-1))
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
lut = lut.clip(0, 1)
return lut
#### matplotlib ####
def _map(self, X):
""" Maps from a scalar or an array to an RGBA value or array.
The *X* parameter is either a scalar or an array (of any dimension).
If it is scalar, the function returns a tuple of RGBA values; otherwise
it returns an array with the new shape = oldshape+(4,). Any values
that are outside the 0,1 interval are clipped to that interval before
generating RGB values.
This is no longer used in this class. It has been deprecated and
retained for API compatibility.
"""
if type(X) in [IntType, FloatType]:
vtype = 'scalar'
xa = array([X])
else:
vtype = 'array'
xa = asarray(X)
# assume the data is properly normalized
#xa = where(xa>1.,1.,xa)
#xa = where(xa<0.,0.,xa)
nanmask = isnan(xa)
xa = where(nanmask, 0, (xa * (self.steps-1)).astype(int))
rgba = zeros(xa.shape+(4,), float)
rgba[...,0] = where(nanmask, 0, take(self._red_lut, xa))
rgba[...,1] = where(nanmask, 0, take(self._green_lut, xa))
rgba[...,2] = where(nanmask, 0, take(self._blue_lut, xa))
rgba[...,3] = where(nanmask, 0, take(self._alpha_lut, xa))
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def _range_changed(self, old, new):
if old is not None:
old.on_trait_change(self._range_change_handler, "updated",
remove = True)
if new is not None:
new.on_trait_change(self._range_change_handler, "updated")
self.updated = new
def _range_change_handler(self, obj, name, new):
"Handles the range changing; dynamically attached to our ranges"
self.updated = obj
# EOF
| bsd-3-clause |
matthewpklein/battsimpy | tests/dae_phie_ce_test.py | 1 | 10270 | import numpy
import numpy.linalg
import scipy.integrate as sci_int
import scipy.linalg
from matplotlib import pyplot as plt
from assimulo.solvers import IDA
from assimulo.problem import Implicit_Problem
def mid_to_edge( var_mid, x_e ) :
var_edge = numpy.array( [var_mid[0]] + [ var_mid[i]*var_mid[i+1]/( ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i])))*var_mid[i+1] + (1- ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i]))))*var_mid[i] ) for i in range(len(var_mid)-1) ] + [var_mid[-1]] )
return var_edge
def flux_mat_builder( N, x_m, vols, P ) :
A = numpy.zeros([N,N], dtype='d')
for i in range(1,N-1) :
A[i,i-1] = (1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1]) - (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[i,i+1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i ])
i=0
A[0,0] = -(1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[0,1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
i=N-1
A[i,i-1] = (1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
return A
class MyProblem( Implicit_Problem ) :
def __init__(self, N, Ac, y0, yd0, name ) :
Implicit_Problem.__init__(self,y0=y0,yd0=yd0,name=name)
self.Ac = Ac
self.T = 298.15
La = 65.0
Ls = 25.0
Lc = 55.0
Lt = (La+Ls+Lc)
X = Lt*1e-6 # [m]
Ns = int(N*(Ls/Lt))
Na = int(N*(La/Lt))
Nc = N - Ns - Na
print 'Na, Nc:', Na, Nc
self.N = N
self.X = X
self.x_e = numpy.linspace( 0.0, X, N+1 )
self.x_m = numpy.array( [ 0.5*(self.x_e[i+1]+self.x_e[i]) for i in range(N) ], dtype='d' )
self.vols = numpy.array( [ (self.x_e[i+1] - self.x_e[i]) for i in range(N)], dtype='d' )
### Diffusivity
self.La, self.Ls, self.Lc = La, Ls, Lc
self.Na, self.Ns, self.Nc = Na, Ns, Nc
eps_a = 0.25
eps_s = 0.5
eps_c = 0.2
ba, bs, bc = 1.2, 0.5, 0.5
eps_a_vec = [ eps_a for i in range(Na) ] # list( eps_a + eps_a/2.*numpy.sin(numpy.linspace(0.,Na/4,Na)) ) # list(eps_a + eps_a*numpy.random.randn(Na)/5.) #
eps_s_vec = [ eps_s for i in range(Ns) ]
eps_c_vec = [ eps_c for i in range(Nc) ] # list( eps_c + eps_c/2.*numpy.sin(numpy.linspace(0.,Nc/4,Nc)) ) # list(eps_c + eps_c*numpy.random.randn(Nc)/5.) #
self.eps_m = numpy.array( eps_a_vec + eps_s_vec + eps_c_vec, dtype='d' )
self.k_m = 1./self.eps_m
self.eps_mb = numpy.array( [ ea**ba for ea in eps_a_vec ] + [ es**bs for es in eps_s_vec ] + [ ec**bc for ec in eps_c_vec ], dtype='d' )
self.eps_eff = numpy.array( [ ea**(1.+ba) for ea in eps_a_vec ] + [ es**(1.+bs) for es in eps_s_vec ] + [ ec**(1.+bc) for ec in eps_c_vec ], dtype='d' )
self.K_m = numpy.diag( self.k_m )
self.pe0_ind = self.Na+self.Ns+self.Nc-3
t_plus = 0.4
F = 96485.0
self.t_plus = t_plus
self.F = F
self.R_gas = 8.314
Rp_c = 6.5e-6
Rp_a = 12.0e-6
as_c = 3.*numpy.array(eps_c_vec, dtype='d')/Rp_c
as_a = 3.*numpy.array(eps_a_vec, dtype='d')/Rp_a
self.as_c = as_c
self.as_a = as_a
self.as_a_mean = 1./self.La*sum( [ asa*v for asa,v in zip(as_a, self.vols[:Na]) ] )
self.as_c_mean = 1./self.Lc*sum( [ asc*v for asc,v in zip(as_c, self.vols[-Nc:]) ] )
print 'asa diff', self.as_a_mean - as_a[0]
print 'asc diff', self.as_c_mean - as_c[0]
Ba = [ (1.-t_plus)*asa/ea for ea, asa in zip(eps_a_vec,as_a) ]
Bs = [ 0.0 for i in range(Ns) ]
Bc = [ (1.-t_plus)*asc/ec for ec, asc in zip(eps_c_vec,as_c) ]
self.B_ce = numpy.diag( numpy.array(Ba+Bs+Bc, dtype='d') )
Bap = [ asa*F*v for asa,v in zip(as_a,self.vols[:Na]) ]
Bsp = [ 0.0 for i in range(Ns) ]
Bcp = [ asc*F*v for asc,v in zip(as_c,self.vols[-Nc:]) ]
# Bap = [ asa*F for asa in as_a ]
# Bsp = [ 0.0 for i in range(Ns) ]
# Bcp = [ asc*F for asc in as_c ]
self.B2_pe = numpy.diag( numpy.array(Bap+Bsp+Bcp, dtype='d') )
def set_j_vec( self, I_app ) :
i_app = I_app / self.Ac
j_in_a = i_app / ( self.La*self.as_a_mean*self.F )
j_in_c = -i_app / ( self.Lc*self.as_c_mean*self.F )
print 'i_app :', i_app
print 'j_in_a:', j_in_a
print 'j_in_c:', j_in_c
# Set the input j
ja = [ j_in_a for i in range(self.Na) ]
js = [ 0.0 for i in range(self.Ns) ]
jc = [ j_in_c for i in range(self.Nc) ]
self.j = numpy.array( ja + js + jc )
## Define c_e functions
def build_Ace_mat( self, c ) :
D_eff = self.Diff_ce( c )
A = self.K_m.dot( flux_mat_builder( self.N, self.x_m, self.vols, D_eff ) )
return A
def Diff_ce( self, c ) :
T = self.T
D_ce = 1e-4 * 10.0**( -4.43 - (54./(T-229.-5e-3*c)) - (0.22e-3*c) ) ## Torchio (LIONSIMBA) ECS paper
#1e-10*numpy.ones_like(c)#
D_mid = D_ce * self.eps_eff
if type(c) == float :
D_edge = D_mid
else :
D_edge = mid_to_edge( D_mid, self.x_e )
return D_edge
## Define phi_e functions
def build_Ape_mat( self, c ) :
k_eff = self.kapp_ce( c )
A = flux_mat_builder( self.N, self.x_m, numpy.ones_like(self.vols), k_eff )
A[-1,-1] = 2*A[-1,-1]
return A
def build_Bpe_mat( self, c ) :
gam = 2.*(1.-self.t_plus)*self.R_gas / self.F
k_eff = self.kapp_ce( c ) #0.1*numpy.ones_like(c)#
c_edge = mid_to_edge( c, self.x_e )
B1 = flux_mat_builder( self.N, self.x_m, numpy.ones_like(self.vols), k_eff*self.T*gam/c_edge )
return B1
def kapp_ce( self, c ) :
T = self.T
k_ce = 1e-4 * c *( -10.5 +0.668e-3*c + 0.494e-6*c**2
+ (0.074 - 1.78*1e-5*c - 8.86e-10*c**2)*T
+ (-6.96e-5 + 2.8e-8*c)*T**2 )**2 ## Torchio (LIONSIMBA) ECS paper
k_mid = k_ce * self.eps_eff
if type(c) == float :
k_edge = k_mid
else :
k_edge = mid_to_edge( k_mid, self.x_e )
return k_edge
## Define system equations
def res( self, t, y, yd ) :
ce = y[ :N]
c_dots = yd[:N]
phi = y[N:]
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
r1 = c_dots - ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(self.j)).flatten()) )
r2 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(self.j).flatten()
res_out = numpy.concatenate( [r1, r2] )
return res_out
def jac( self, c, t, y, yd ) :
ce = y[ :N]
c_dots = yd[:N]
phi = y[N:]
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
A_dots = numpy.diag( [1*c for i in range(self.N)] )
j_c = A_dots - A_ce
j = scipy.linalg.block_diag( j_c, A_pe )
j[self.N:,:self.N] = -B_pe
return j
### Mesh
N = 80
#X = 165e-6 # [m]
cell_coated_area = 1.0 # [m^2]
I_app = 200.0 #300.0 # A
### Initial conditions
c_init = 1000.0 # [mol/m^3]
c_centered = c_init*numpy.ones( N, dtype='d' )#numpy.linspace(1500, 500, N) #
p_init = 0.0 # [V]
p_centered = p_init*numpy.ones( N, dtype='d' )
#The initial conditons
y0 = numpy.concatenate( [c_centered, p_centered] ) #Initial conditions
yd0 = [0.0 for i in range(2*N)] #Initial conditions
#Create an Assimulo implicit problem
imp_mod = MyProblem(N,cell_coated_area,y0,yd0,'Example using an analytic Jacobian')
#Sets the options to the problem
#imp_mod.jac = jac #Sets the jacobian
imp_mod.algvar = [1.0 for i in range(N)] + [0.0 for i in range(N)] #Set the algebraic components
#Create an Assimulo implicit solver (IDA)
imp_sim = IDA(imp_mod) #Create a IDA solver
#Sets the paramters
imp_sim.atol = 1e-5 #Default 1e-6
imp_sim.rtol = 1e-5 #Default 1e-6
imp_sim.suppress_alg = True #Suppres the algebraic variables on the error test
#Simulate
#res_test = imp_mod.res( 0.0, y0, yd0 )
#jac_test = imp_mod.jac( 2, 0.0, y0, yd0 )
#Let Sundials find consistent initial conditions by use of 'IDA_YA_YDP_INIT'
imp_mod.set_j_vec( I_app )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
# Sim step 1
t1, y1, yd1 = imp_sim.simulate(10,100)
#imp_mod.set_j_vec( I_app/10. )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
## Sim step 1
#ta, ya, yda = imp_sim.simulate(10.05,100)
#imp_mod.set_j_vec( I_app/100. )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
## Sim step 1
#tb, yb, ydb = imp_sim.simulate(10.1,100)
imp_mod.set_j_vec( 0.0 )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
# Sim step 1
t2, y2, yd2 = imp_sim.simulate(1100,100)
c_avg_0 = numpy.mean( imp_mod.eps_m*y0[:N] )
c_avg_f = numpy.mean( imp_mod.eps_m*y2[-1,:N] )
print c_avg_0
print c_avg_f
#Plot
# Plot through space
f, ax = plt.subplots(2,2)
ax[0,0].plot(imp_mod.x_m*1e6,y1.T[:N]) #Plot the solution
ax[0,1].plot(imp_mod.x_m*1e6,y1.T[N:]) #Plot the solution
ax[0,0].set_title('t1 c')
ax[0,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0,1].set_title('t1 p')
ax[0,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,1].set_ylabel('E-lyte Potential [V]')
ax[1,0].plot(imp_mod.x_m*1e6,y2.T[:N]) #Plot the solution
ax[1,1].plot(imp_mod.x_m*1e6,y2.T[N:]) #Plot the solution
ax[1,0].set_title('t2 c')
ax[1,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[1,1].set_title('t2 p')
ax[1,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,1].set_ylabel('E-lyte Potential [V]')
plt.tight_layout()
# Plot through time
f, ax = plt.subplots(1,2)
ax[0].plot(t1,y1[:,:N]) #Plot the solution
ax[1].plot(t1,y1[:,N:]) #Plot the solution
ax[0].plot(t2,y2[:,:N]) #Plot the solution
ax[1].plot(t2,y2[:,N:]) #Plot the solution
ax[0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0].set_xlabel('Time [s]')
ax[1].set_ylabel('E-lyte Potential [V]')
ax[1].set_xlabel('Time [s]')
plt.tight_layout()
plt.show()
| gpl-3.0 |
Hugo1991/TFM | genetic_clustering_kmeans.py | 2 | 42285 | import networkx as nx
import random as rnd
import Tkinter
from Tkinter import *
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing as mp
from node_values import node_values
from numpy import sqrt
from enum import Enum
import findspark
import matplotlib.pyplot as plt
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
findspark.init("/media/hugo/Datos/Linux/spark/")
from pyspark import SparkContext, SparkConf, Row
from pyspark.mllib.linalg import DenseVector
class execution_type(Enum):
static_part1 = 0
static_part2 = 1
static = 2
dynamic = 3
class weight_edges(Enum):
linear = 0
variable = 1
class genetic_graph:
def __init__(self, G,sc, configuration=weight_edges.variable):
self.G = G
self.configuration = configuration
self.createNodes()
self.createEdges()
operations = graph_operations(self.G)
operations.computeClusters(sc)
nx.draw(self.G)
plt.show()
def createNodes(self):
nx.set_node_attributes(self.G, 'concept', None)
rnd.seed()
value = 0
for i in self.G.nodes_iter():
self.G.node[i]['id'] = value
self.G.node[i]['concept'] = rnd.randint(0, 9)
value = value + 1
def createEdges(self):
value = 0
for i in self.G.edges_iter():
self.G.edge[i[0]][i[1]]['id'] = value
if self.configuration == weight_edges.variable:
self.G.edge[i[0]][i[1]]['weight'] = 1
value = value + 1
class graph_operations:
def __init__(self, G, option=execution_type.dynamic):
self.nodes = []
self.G = G
self.option = option
self.createHubs()
def createHubs(self):
self.hub_vertexes = []
self.non_hub_vertexes = []
self.HVSs = []
self.clusters = []
self.num_percentage_vertexes = 20
self.num_hub_vertexes = int(self.G.number_of_nodes() * self.num_percentage_vertexes / 100.0)
self.hub_score = 1
self.no_hub_score = 0.5
def get_non_hub_vertexes(self):
return self.non_hub_vertexes
def get_HVSs(self):
return self.HVSs
def getSalienceRanking(self):
for i in self.G.nodes_iter():
new_salience = node_values(self.G.node[i]['id'], len(self.G.neighbors(i)))
self.nodes.append(new_salience)
self.nodes = sorted(self.nodes, key=lambda node_values: node_values.get_value())
return self.nodes
def computeClusters(self,sc):
# Obtener los HVS.
self.initHVS(sc)
self.generateHVSs(sc)
# Unir los HVS que presenten una conectividad interna menor que la que tienen entre sa.
estimators = [('k_means_iris_8', KMeans(n_clusters=8)),
('k_means_iris_3', KMeans(n_clusters=3)),
('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,
init='random'))]
fignum = 1
titles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']
for name, est in estimators:
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
est.fit(self.HVSs.collect())
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.dist = 12
fig.show()
print("HVSs3", self.HVSs)
# Asignar los 'non hub vertices' a los clusters
non_hub = NonHubGenetic(self)
solution = non_hub.evolution(sc);
self.assignSolutiontoClusters(solution)
# self.assignNonHubToClusters()
print("Clusters:")
for i in range(len(self.clusters)):
print(self.clusters[i])
def assignSolutiontoConnectivity(self, solution):
for i in range(0, len(solution)):
connectivity = solution[i].get_value()[0]
if connectivity != -1:
new_connectivity = solution[i].get_value()[1]
position = solution[i].get_iden()
self.HVSs[new_connectivity].append(self.HVSs[position][connectivity])
self.HVSs[position].pop(connectivity)
i = 0
while i in range(0, len(self.HVSs)):
if len(self.HVSs[i]) == 0:
self.HVSs.pop(i)
else:
i = i + 1
def assignSolutiontoHVS(self, solution):
pops = []
for i in range(0, len(solution)):
connection = solution[i].get_value()
if connection != -1:
position = solution[i].get_iden()
self.HVSs[position].extend(self.HVSs[connection])
pops.append(connection)
for i in range(0, len(pops)):
self.HVSs.pop(i)
def assignSolutiontoClusters(self, solution):
for i in range(len(self.HVSs)):
self.clusters.append(self.HVSs[i])
for i in range(0, len(solution)):
chromosome = solution[i]
iden = chromosome.get_iden()
cluster = chromosome.get_value()
if cluster != -1:
self.clusters[cluster].append(iden)
def initHVS(self,sc):
# Obtener los 'n' hub vertices y los 'N-n' no hub vertices.
ranking = self.getSalienceRanking()
stop = len(ranking) - self.num_hub_vertexes - 2
for i in range(len(ranking) - 1, stop, -1):
self.hub_vertexes.append(ranking[i].get_iden())
self.hub_vertexes=sc.parallelize(self.hub_vertexes)
print("hubs:", self.hub_vertexes.collect())
start = len(ranking) - self.num_hub_vertexes - 2
for i in range(start, 0, -1):
self.non_hub_vertexes.append(ranking[i].get_iden())
self.non_hub_vertexes=sc.parallelize(self.non_hub_vertexes)
def generateHVSs(self,sc):
# Inicialmente, creamos un HVS por cada Hub Vertice.
self.HVSs= self.hub_vertexes.map(lambda x: [x])
def interconnectHVSs(self,sc):
# Para cada hub vertice, comprobar si existe un HVS distinto al que pertenece
# con el que presente una mayor conectividad que al suyo propio.
change = True
HVSs=self.HVSs.collect()
while (change):
change = False
i = 0
while (i < len(HVSs)):
vertexes = HVSs[i]
j = 0
while (j < len(vertexes)):
iden = vertexes[j]
intraconnection = self.getConnectionWithHVS(iden, HVSs[i])
interconnection = self.getMaxConnectionWithHVSs(iden, intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0: # Existe otro HVS con el que se encuentra mas conectado.
# Cambiar al vortice de HVS.
change = True
#HVSs.map(lambda x: x).collect()[i].pop(j)
HVSs[i].pop(j)
HVSs[interconnection[0]].append(iden)
print iden
else:
j = j + 1
if len(vertexes) == 0:
HVSs.pop(i)
self.HVSs = sc.parallelize(HVSs)
else:
i = i + 1
def similarityHVSs(self):
change = True
while (change):
change = False
pops = []
for i in range(len(self.HVSs)):
hvs1 = self.HVSs.collect()[i]
j = i
while (j < self.HVSs.count()):
hvs2 = self.HVSs.collect()[j]
intra_sim1 = self.getIntraSimilarity(hvs1)
intra_sim2 = self.getIntraSimilarity(hvs2)
inter_sim = self.getInterSimilarity(hvs1, hvs2)
if (inter_sim > intra_sim1 or inter_sim > intra_sim2):
# Unir ambos HVSs.
print ("entra")
self.HVSs.collect()[i].extend(hvs2)
pops.append(j)
change = True
j = j + 1
for i in pops:
print("entra")
self.HVSs.pop(i)
# Funcion que devuelve el nodo del grafo que tiene el identificador indicado.
def getNodeFromIden(self, iden):
result = None
for i in self.G.nodes_iter():
node = self.G.node[i]
if iden == node['id']:
result = node
break
return result
# Funcion que devuelve el HVS con el que un concepto presenta una mayor conectividad, si esta supera su conectividad interna.
def getMaxConnectionWithHVSs(self, iden, intraconnection):
max_connection = 0.0
max_position = -1
result = []
result.append(-1)
result.append(-1)
for i in range(self.HVSs.count()):
connection = self.getConnectionWithHVS(iden, self.HVSs.collect()[i]);
if (connection > max_connection):
max_connection = connection
max_position = i
if (max_connection > intraconnection):
result[0] = max_position
result[1] = max_connection
else:
result[0] = -1;
result[1] = -1;
return result
# Funcion que devuelve la conectividad de un concepto con respecto a un HVS.
def getConnectionWithHVS(self, iden, vertexes):
node = self.getNodeFromIden(iden)
neighbors = self.G.neighbors(node['id'])
connection = 0.0
for i in range(len(neighbors)):
neighbor_iden = neighbors[i]
if neighbor_iden in vertexes:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'], neighbor['id']):
edge_data = self.G.get_edge_data(node['id'], neighbor['id'])
connection = edge_data['weight']
break
return connection
# Funcion que calcula la similitud (conectividad) entre los conceptos de un HVS.
def getIntraSimilarity(self, vertexes):
similarity = 0.0;
for i in range(len(vertexes)):
iden = vertexes[i]
node = self.getNodeFromIden(iden)
neighbors = self.G.neighbors(node['id'])
for j in range(len(neighbors)):
neighbor_iden = neighbors[j]
if neighbor_iden in vertexes:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'], neighbor['id']):
edge_data = self.G.get_edge_data(node['id'], neighbor['id'])
weight = edge_data['weight']
similarity = similarity + weight
return similarity
# Funcion que calcula la similitud (conectividad) entre dos HVSx.
def getInterSimilarity(self, hvs1, hvs2):
similarity = 0.0;
for i in range(len(hvs1)):
iden = hvs1[i]
node = self.getNodeFromIden(iden)
neighbors = self.G.neighbors(node['id'])
for j in range(len(neighbors)):
neighbor_iden = neighbors[j]
if neighbor_iden in hvs2:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'], neighbor['id']):
edge_data = self.G.get_edge_data(node['id'], neighbor['id'])
weight = edge_data['weight']
similarity = similarity + weight
return similarity
# Motodo que elimina los HVSs con conectividad 1.
def extractNodesWithOneVertex(self):
i = 0
while (i < len(self.HVSs)):
vertexes = self.HVSs[i]
if len(vertexes) <= 1:
self.non_hub_vertexes.append(vertexes[0])
self.HVSs.remove(vertexes)
else:
i = i + 1
# Dado un nodo, devuelve el HVS al que mas se asemeja, y a cuyo cluster.
def getMoreSimilarHVS(self, iden):
max_position = -1
max_similarity = 0.0
for i in range(len(self.HVSs)):
similarity = 0.0
vertexes = self.HVSs[i]
for j in range(len(vertexes)):
hv = vertexes[j]
hvnode = self.getNodeFromIden(hv)
node = self.getNodeFromIden(iden)
pos = self.find(node, hvnode)
if (pos != -1):
edge_data = self.G.get_edge_data(node['id'], self.G.node[pos]['id'])
weight = edge_data['weight']
similarity = similarity + weight
if (similarity > max_similarity):
max_position = i
max_similarity = similarity
return max_position
def find(self, node1, node2):
result = -1
processed = []
itr = nx.all_neighbors(self.G, node1['id'])
for i in itr:
if i not in processed:
processed.append(i)
if self.G.node[i]['concept'] == node2['concept']:
result = self.G.node[i]['id']
break
return result
class HVSConnectivityGenetic():
def __init__(self, graph_operations, limit=800, size=16, margin_crossover=0.6, prob_crossover=0.9,
margin_mutation=0.1, prob_mutation=0.4):
rnd.seed(0)
self.counter = 0
self.graph_operations = graph_operations
self.target = self.graph_operations.get_HVSs().count()
self.limit = limit
self.size = size
self.margin_crossover = margin_crossover
self.prob_crossover = prob_crossover
self.margin_mutation = margin_mutation
self.prob_mutation = prob_mutation
self.children = []
def init_population(self,sc):
population = []
for _ in range(0, self.size):
chromosome = self.init_chromosome(sc)
population.append(chromosome.collect())
return sc.parallelize(population)
#<type 'list'>: [<node_values.node_values instance at 0x7f1588220ea8>, <node_values.node_values instance at 0x7f1588220f38>, <node_values.node_values instance at 0x7f1588225050>, <node_values.node_values instance at 0x7f1588225098>, <node_values.node_values instance at 0x7f15882250e0>, <node_values.node_values instance at 0x7f1588225128>, <node_values.node_values instance at 0x7f1588225170>, <node_values.node_values instance at 0x7f15882251b8>, <node_values.node_values instance at 0x7f1588225200>, <node_values.node_values instance at 0x7f1588225248>, <node_values.node_values instance at 0x7f1588225290>, <node_values.node_values instance at 0x7f15882252d8>, <node_values.node_values instance at 0x7f1588225320>, <node_values.node_values instance at 0x7f1588225368>, <node_values.node_values instance at 0x7f15882253b0>, <node_values.node_values instance at 0x7f15882253f8>, <node_values.node_values instance at 0x7f1588225440>, <node_values.node_values instance at 0x7f1588225488>, <node_values.node_values instance at 0x7f15882254d0>, <node_values.node_values instance at 0x7f1588225518>, <node_values.node_values instance at 0x7f1588225560>, <node_values.node_values instance at 0x7f15882255a8>, <node_values.node_values instance at 0x7f15882255f0>, <node_values.node_values instance at 0x7f1588225638>, <node_values.node_values instance at 0x7f1588225680>, <node_values.node_values instance at 0x7f15882256c8>, <node_values.node_values instance at 0x7f1588225710>, <node_values.node_values instance at 0x7f1588225758>, <node_values.node_values instance at 0x7f15882257a0>, <node_values.node_values instance at 0x7f15882257e8>, <node_values.node_values instance at 0x7f1588225830>, <node_values.node_values instance at 0x7f1588225878>, <node_values.node_values instance at 0x7f15882258c0>, <node_values.node_values instance at 0x7f1588225908>, <node_values.node_values instance at 0x7f1588225950>]
def init_chromosome(self,sc):
long=self.graph_operations.get_HVSs().count()
chromosome = self.graph_operations.get_HVSs().zipWithIndex().map(
lambda x: node_values(x[1], [rnd.randint(-1, long - 1), -1]))
return chromosome
def fitness(self, chromosome):
accurancy = 0
for i in range(0, len(chromosome)):
vertexes = self.graph_operations.get_HVSs().collect()[i]
j = 0
found = False
while (j < len(vertexes) and not found):
iden = vertexes[j]
intraconnection = self.graph_operations.getConnectionWithHVS(iden, self.graph_operations.get_HVSs().collect()[i])
interconnection = self.graph_operations.getMaxConnectionWithHVSs(iden, intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0:
if chromosome[i].get_value()[0] == j:
found = True
chromosome[i].set_value([chromosome[i].get_value()[0], interconnection[0]])
accurancy = accurancy + 1
else:
j = j + 1
else:
j = j + 1
if found == False:
if chromosome[i].get_value()[0] == -1:
accurancy = accurancy + 1
return accurancy
def get_optimal(self, position):
vertexes = self.graph_operations.get_HVSs().collect()[position]
result = -1
inter = -1
j = 0
found = False
while (j < len(vertexes) and not found):
iden = vertexes[j]
intraconnection = self.graph_operations.getConnectionWithHVS(iden,
self.graph_operations.get_HVSs()[position])
interconnection = self.graph_operations.getMaxConnectionWithHVSs(iden, intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0:
result = j
inter = interconnection[0]
found = True
else:
j = j + 1
return result, inter
def calculate_fitness(self,sc, population):
values = []
#values=population.map(lambda x:self.fitness(x)).collect()
for i in population.collect():
fit = self.fitness(i)
values.append(fit)
print values
return values
def completed_evolution(self, values):
for i in values:
if i == self.target:
return True
return False
def get_max_values(self, values):
best1 = 0;
best2 = 0
position1 = -1;
position2 = -1
for i in range(0, len(values)):
if values[i] > best1:
best2 = best1
best1 = values[i]
position1 = i
elif values[i] > best2:
best2 = values[i]
position2 = i
return position1, position2
def get_fittest_individuals(self, population, values):
position1, position2 = self.get_max_values(values)
fittest1 = population[position1]
fittest2 = population[position2]
return fittest1, fittest2
def new_individual(self, parent1, parent2):
child = self.crossover(parent1, parent2)
self.mutation(child)
return child
def log_individual(self, child):
self.children.append(child)
def reproduce(self, parent1, parent2):
self.children = []
pool = mp.Pool()
for _ in range(0, self.size):
pool.apply_async(self.new_individual, args=(parent1, parent2,), callback=self.log_individual)
pool.close()
pool.join()
def crossover(self, parent1, parent2):
child = []
cross = rnd.randint(0, 1)
# El hijo es una mezcla de los progenitores.
if self.prob_crossover > cross:
space = int(len(parent1) * self.margin_crossover)
margin = rnd.randint(int(space / 2), space)
for i in range(0, margin):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
# value2 = parent1[i].get_value2()
new_part = node_values(iden, value)
child.append(new_part)
for i in range(margin, len(parent2)):
iden = parent2[i].get_iden()
value = parent2[i].get_value()
# value2 = parent2[i].get_value2()
new_part = node_values(iden, value)
child.append(new_part)
else:
# El hijo es una copia exacta del mejor progenitor.
for i in range(len(parent1)):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
# value2 = parent1[i].get_value2()
new_part = node_values(iden, value)
child.append(new_part)
return child
def mutation(self, chromosome):
mutate = rnd.randint(0, 1)
if self.prob_mutation > mutate:
# El hijo presenta mutaciones en sus genes.
margin = int(len(chromosome) * self.margin_mutation)
for _ in range(0, margin):
position = rnd.randint(0, len(chromosome) - 1)
optimal, interconnection = self.get_optimal(position)
if optimal == chromosome[position].get_value()[0]:
randomization = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
chromosome[position].set_value([randomization, -1])
else:
chromosome[position].set_value([optimal, interconnection])
def get_worst(self, values):
target = self.target
position = -1
for i in range(0, len(values)):
if values[i] < target:
target = values[i]
position = i
return position
def natural_selection(self, population, values):
for child in self.children:
position = self.get_worst(values)
if position != -1:
fit = self.fitness(child)
if fit > values[position]:
population.pop(position)
population.append(child)
values.pop(position)
values.append(fit)
def evolution(self,sc):
completed = False
population = self.init_population(sc)
fitness_values = self.calculate_fitness(sc,population)
while (self.counter < self.limit and not completed):
if (self.completed_evolution(fitness_values)):
completed = True
else:
parent1, parent2 = self.get_fittest_individuals(population, fitness_values)
union = sc.parallelize(parent1 + parent2)
#pool.apply_async(self.new_individual, args=(parent1, parent2,), callback=self.log_individual)
prueba1=[1,2,3,4]
self.children = []
self.children.append(union.map(lambda parent:self.new_individual(parent[0],parent[1])).collect())
self.reproduce(parent1, parent2)
self.natural_selection(population, fitness_values)
if self.counter % 10 == 0:
print(
"La precision en la generacion", self.counter, "es de", max(fitness_values), "sobre", self.target)
result = ""
for i in range(0, len(parent1)):
result = result + parent1[i].__str__() + " "
print(result)
fitness_values = self.calculate_fitness(sc,population)
self.counter = self.counter + 1
parent, _ = self.get_fittest_individuals(population, fitness_values)
return parent
class HVSInternalGenetic():
def __init__(self, graph_operations, limit=800, size=16, margin_crossover=0.6, prob_crossover=0.9,
margin_mutation=0.1, prob_mutation=0.4):
rnd.seed(0)
self.counter = 0
self.graph_operations = graph_operations
self.target = len(self.graph_operations.get_HVSs())
self.limit = limit
self.size = size
self.margin_crossover = margin_crossover
self.prob_crossover = prob_crossover
self.margin_mutation = margin_mutation
self.prob_mutation = prob_mutation
self.children = []
def init_population(self):
population = []
for _ in range(0, self.size):
chromosome = self.init_chromosome()
population.append(chromosome)
return population
def init_chromosome(self):
chromosome = []
for i in range(0, len(self.graph_operations.get_HVSs())):
value = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
relation = node_values(i, value)
chromosome.append(relation)
return chromosome
def fitness(self, chromosome):
accurancy = 0
for i in range(0, len(chromosome)):
hvs1 = self.graph_operations.get_HVSs()[i]
j = i
found = False
while (j < len(chromosome) and not found):
hvs2 = self.graph_operations.get_HVSs()[j]
intra_sim1 = self.graph_operations.getIntraSimilarity(hvs1)
intra_sim2 = self.graph_operations.getIntraSimilarity(hvs2)
inter_sim = self.graph_operations.getInterSimilarity(hvs1, hvs2)
if (inter_sim > intra_sim1 or inter_sim > intra_sim2):
if (chromosome[i].get_value() == j):
found = True
accurancy = accurancy + 1
else:
j = j + 1
else:
j = j + 1
if found == False:
if chromosome[i].get_value() == -1:
accurancy = accurancy + 1
return accurancy
def calculate_fitness(self, population):
values = []
for i in population:
fit = self.fitness(i)
values.append(fit)
return values
def completed_evolution(self, values):
for i in values:
if i == self.target:
return True
return False
def get_max_values(self, values):
best1 = 0;
best2 = 0
position1 = -1;
position2 = -1
for i in range(0, len(values)):
if values[i] > best1:
best2 = best1
best1 = values[i]
position1 = i
elif values[i] > best2:
best2 = values[i]
position2 = i
return position1, position2
def get_fittest_individuals(self, population, values):
position1, position2 = self.get_max_values(values)
fittest1 = population[position1]
fittest2 = population[position2]
return fittest1, fittest2
def new_individual(self, parent1, parent2):
child = self.crossover(parent1, parent2)
self.mutation(child)
return child
def log_individual(self, child):
self.children.append(child)
def reproduce(self, parent1, parent2):
self.children = []
pool = mp.Pool()
for _ in range(0, self.size):
pool.apply_async(self.new_individual, args=(parent1, parent2,), callback=self.log_individual)
pool.close()
pool.join()
def crossover(self, parent1, parent2):
child = []
cross = rnd.randint(0, 1)
# El hijo es una mezcla de los progenitores.
if self.prob_crossover > cross:
space = int(len(parent1) * self.margin_crossover)
margin = rnd.randint(int(space / 2), space)
for i in range(0, margin):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
for i in range(margin, len(parent2)):
iden = parent2[i].get_iden()
value = parent2[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
else:
# El hijo es una copia exacta del mejor progenitor.
for i in range(len(parent1)):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
return child
def get_optimal(self, position):
result = -1
found = False
hvs1 = self.graph_operations.get_HVSs()[position]
j = position
while (j < len(self.graph_operations.get_HVSs()) and not found):
hvs2 = self.graph_operations.get_HVSs()[j]
intra_sim1 = self.graph_operations.getIntraSimilarity(hvs1)
intra_sim2 = self.graph_operations.getIntraSimilarity(hvs2)
inter_sim = self.graph_operations.getInterSimilarity(hvs1, hvs2)
if (inter_sim > intra_sim1 or inter_sim > intra_sim2):
result = j
found = True
else:
j = j + 1
return result
def mutation(self, chromosome):
mutate = rnd.randint(0, 1)
if self.prob_mutation > mutate:
# El hijo presenta mutaciones en sus genes.
margin = int(len(chromosome) * self.margin_mutation)
for _ in range(0, margin):
position = rnd.randint(0, len(chromosome) - 1)
optimal = self.get_optimal(position)
if optimal == chromosome[position].get_value():
randomization = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
chromosome[position].set_value(randomization)
else:
chromosome[position].set_value(optimal)
def get_worst(self, values):
target = self.target
position = -1
for i in range(0, len(values)):
if values[i] < target:
target = values[i]
position = i
return position
def natural_selection(self, population, values):
for child in self.children:
position = self.get_worst(values)
if position != -1:
fit = self.fitness(child)
if fit > values[position]:
population.pop(position)
population.append(child)
values.pop(position)
values.append(fit)
print("fit", fit)
def evolution(self):
completed = False
population = self.init_population()
fitness_values = self.calculate_fitness(population)
while (self.counter < self.limit and not completed):
if (self.completed_evolution(fitness_values)):
completed = True
else:
parent1, parent2 = self.get_fittest_individuals(population, fitness_values)
self.reproduce(parent1, parent2)
self.natural_selection(population, fitness_values)
if self.counter % 10 == 0:
print("assssssssssssssssssssssssss")
print(
"La precision en la generacion", self.counter, "es de", max(fitness_values), "sobre", self.target)
result = ""
for i in range(0, len(parent1)):
result = result + parent1[i].__str__() + " "
print(result)
fitness_values = self.calculate_fitness(population)
self.counter = self.counter + 1
parent, _ = self.get_fittest_individuals(population, fitness_values)
return parent
class NonHubGenetic():
def __init__(self, graph_operations, limit=20, size=16, margin_crossover=0.6, prob_crossover=0.9,
margin_mutation=0.1, prob_mutation=0.4, artificial_mutation=True, mutation_accurancy=0.2):
rnd.seed(0)
self.graph_operations = graph_operations
self.target = len(self.graph_operations.get_non_hub_vertexes())
self.counter = 0
self.limit = limit
self.size = size
self.margin_crossover = margin_crossover
self.prob_crossover = prob_crossover
self.margin_mutation = margin_mutation
self.prob_mutation = prob_mutation
self.artificial_mutation = artificial_mutation
self.mutation_accurancy = mutation_accurancy
self.children = []
def init_population(self):
population = []
for _ in range(0, self.size):
chromosome = self.init_chromosome()
population.append(chromosome)
return population
def init_chromosome(self):
chromosome = []
for node in self.graph_operations.get_non_hub_vertexes():
value = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
info = node_values(node, value)
chromosome.append(info)
return chromosome
def fitness(self, chromo):
accurancy = 0
for value in chromo:
position = self.graph_operations.getMoreSimilarHVS(value.get_iden())
if position == value.get_value():
accurancy = accurancy + 1
return accurancy
def calculate_fitness(self, population,sc):
values = []
rddPopulation=sc.parallelize(population)
#values.append(self.fitness(rddPopulation.map(lambda x:x)))
for i in population:
fit = self.fitness(i)
values.append(fit)
return values
def mifuncion(self,x):
print(x)
def completed_evolution(self, values):
for i in values:
if i == self.target:
return True
return False
def get_max_values(self, values):
best1 = 0;
best2 = 0
position1 = -1;
position2 = -1
for i in range(0, len(values)):
if values[i] > best1:
best2 = best1
best1 = values[i]
position1 = i
elif values[i] > best2:
best2 = values[i]
position2 = i
return position1, position2
def get_fittest_individuals(self, population, values):
position1, position2 = self.get_max_values(values)
fittest1 = population[position1]
fittest2 = population[position2]
return fittest1, fittest2
def new_individual(self, parent1, parent2):
child = self.crossover(parent1, parent2)
self.mutation(child)
return child
def log_individual(self, child):
self.children.append(child)
def reproduce(self, parent1, parent2):
self.children = []
pool = mp.Pool()
for _ in range(0, self.size):
pool.apply_async(self.new_individual, args=(parent1, parent2,), callback=self.log_individual)
pool.close()
pool.join()
def get_worst(self, values):
target = self.target
position = -1
for i in range(0, len(values)):
if values[i] < target:
target = values[i]
position = i
return position
def crossover(self, parent1, parent2):
child = []
cross = rnd.randint(0, 1)
# El hijo es una mezcla de los progenitores.
if self.prob_crossover > cross:
space = int(len(parent1) * self.margin_crossover)
margin = rnd.randint(int(space / 2), space)
for i in range(0, margin):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
for i in range(margin, len(parent2)):
iden = parent2[i].get_iden()
value = parent2[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
else:
# El hijo es una copia exacta del mejor progenitor.
for i in range(len(parent1)):
iden = parent1[i].get_iden()
value = parent1[i].get_value()
new_part = node_values(iden, value)
child.append(new_part)
return child
def mutation(self, chromosome):
mutate = rnd.randint(0, 1)
if self.prob_mutation > mutate:
# El hijo presenta mutaciones en sus genes.
margin = int(len(chromosome) * self.margin_mutation)
for _ in range(0, margin):
position = rnd.randint(0, len(chromosome) - 1)
iden = chromosome[position].get_iden()
optimal = self.graph_operations.getMoreSimilarHVS(iden)
if self.artificial_mutation == True:
prob = rnd.randint(0, 1)
if self.mutation_accurancy >= prob:
chromosome[position].set_value(optimal)
else:
randomization = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
chromosome[position].set_value(randomization)
else:
# Arreglo para trampear la mutacion y obtener la solucion.
if chromosome[position].get_value() == optimal:
while True:
randomization = rnd.randint(-1, len(self.graph_operations.get_HVSs()) - 1)
if randomization != optimal:
chromosome[position].set_value(randomization)
break;
else:
chromosome[position].set_value(optimal)
def natural_selection(self, population, values):
for child in self.children:
position = self.get_worst(values)
if position != -1:
fit = self.fitness(child)
if fit > values[position]:
population.pop(position)
population.append(child)
values.pop(position)
values.append(fit)
def evolution(self,sc):
completed = False
population = self.init_population()
fitness_values = self.calculate_fitness(population,sc)
old_max_value = 0
max_value = max(fitness_values)
original_mutation_accurancy = self.mutation_accurancy
while (self.counter < self.limit and not completed):
if (self.completed_evolution(fitness_values)):
completed = True
else:
parent1, parent2 = self.get_fittest_individuals(population, fitness_values)
self.reproduce(parent1, parent2)
self.natural_selection(population, fitness_values)
if self.counter % 10 == 0:
if self.artificial_mutation == True:
if old_max_value >= max_value:
self.mutation_accurancy = self.mutation_accurancy + 0.1
elif self.mutation_accurancy > original_mutation_accurancy:
self.mutation_accurancy = self.mutation_accurancy - 0.1
print("assssssssssssssssssssssssss")
print("La precision en la generacion", self.counter, "es de", max_value, "sobre", self.target)
result = ""
for i in range(0, len(parent1)):
result = result + parent1[i].__str__() + " "
print(result)
fitness_values = self.calculate_fitness(population,sc)
old_max_value = max_value
max_value = max(fitness_values)
self.counter = self.counter + 1
parent, _ = self.get_fittest_individuals(population, fitness_values)
return parent
def square_matrix_from_vector(matrix,square):
matrix = matrix.reshape((square,square))
return matrix
def read_graph(sc):
M = []
rows = 0
columns = 0
csv_filename = "./examples/matriz.csv"
distFile = sc.textFile(csv_filename)
line=sc.broadcast(distFile.flatMap(lambda x:x.strip().split(",")).map(lambda x:float(x)).collect()).value
columns = len(line)
square = int(sqrt(columns))
matrix = np.matrix(line)
b = abs(square) - abs(int(square))
if rows != columns:
if b == 0:
matrix = square_matrix_from_vector(matrix,square)
else:
raise Exception('Matrix cannot be converted to square.')
return nx.from_numpy_matrix(matrix)
def main():
conf = SparkConf().setAppName("hhhhuh")
conf = SparkConf().setMaster("local[*]")
sc = SparkContext(conf=conf)
G = read_graph(sc)
array = []
print (nx.info(G))
rddNodes=sc.parallelize(G.nodes())
#for i in range(0,rddNodes.count()):
# array.append(i)
# if((i%20==0)&(i!=0)):
# subgrafo = nx.subgraph(G, array)
# genetic_graph(subgrafo)
# array=[]
genetic_graph(G,sc)
if __name__ == '__main__':
main()
#subgrafo = nx.subgraph(G, [1, 2, 3, 4])
#genetic_graph(subgrafo) | apache-2.0 |
lyoshiwo/resume_job_matching | Step10_test_ensemble.py | 1 | 3786 | # encoding=utf8
import numpy as np
from sklearn import cross_validation
import pandas as pd
import os
import time
from keras.models import Sequential, model_from_json
import util
def score_lists(list_1, list_2):
count = 0
total = len(list_1)
print total
for i in range(total):
if list_1[i] == list_2[i]:
count += 1
return float(count) / total
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_esembel_score(name):
if os.path.exists(util.features_prefix + name + "_XXXYYY.pkl") is False:
print 'file does not exist'
exit()
[X_train, X_validate, X_test, y_train, y_validate, y_test] = pd.read_pickle(
util.features_prefix + name + '_XXXYYY.pkl')
import xgboost as xgb
rf_clf_2 = pd.read_pickle(util.models_prefix + name+'_rf.pkl')
list_all = []
rf_2_list = rf_clf_2.predict(X_test)
from sklearn.feature_selection import SelectFromModel
model = SelectFromModel(rf_clf_2, prefit=True)
temp = model.get_support()
print sum(temp)
list_all.append(rf_2_list)
print rf_clf_2.score(X_test, y_test)
xgb_2 = xgb.Booster({'nthread': 4}) # init model
xgb_2.load_model(util.models_prefix +name+ '_xgb.pkl') # load data
print len(xgb_2.get_fscore().keys())
dtest = xgb.DMatrix(X_test)
xgb_2_test = xgb_2.predict(dtest)
list_all.append(xgb_2_test)
print score_lists(xgb_2_test, y_test)
from keras.utils import np_utils
import copy
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
X_semantic = np.array(copy.deepcopy(X_test[:, range(95, 475)]))
X_manual = np.array(copy.deepcopy(X_test[:, range(0, 95)]))
X_cluster = np.array(copy.deepcopy(X_test[:, range(475, 545)]))
X_document = np.array(copy.deepcopy(X_test[:, range(545, 547)]))
X_document[:, [0]] = X_document[:, [0]] + train_X[:, [-1]].max()
X_semantic = X_semantic.reshape(X_semantic.shape[0], 10, -1)
X_semantic_1 = np.zeros((X_semantic.shape[0], X_semantic.shape[2], X_semantic.shape[1]))
for i in range(int(X_semantic.shape[0])):
X_semantic_1[i] = np.transpose(X_semantic[i])
json_string = pd.read_pickle(util.models_prefix +name+ '_json_string_cnn.pkl')
model_cnn = model_from_json(json_string)
model_cnn.load_weights(util.models_prefix + name+'_nn_weight_cnn.h5')
cnn_list = model_cnn.predict_classes([X_document, X_cluster, X_manual, X_semantic_1])
# cnn_list_prob = model_cnn.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
kk = list(cnn_list)
list_all.append(kk)
print score_lists(kk, y_test)
json_string = pd.read_pickle(util.models_prefix + name + '_json_string_lstm.pkl')
model_lstm = model_from_json(json_string)
model_lstm.load_weights(util.models_prefix + name + '_nn_weight_lstm.h5')
lstm_list = model_lstm.predict_classes([X_document, X_cluster, X_manual, X_semantic_1])
# cnn_list_prob = model_cnn.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
kk = list(lstm_list)
list_all.append(kk)
print score_lists(kk, y_test)
list_ensemble = []
for i in range(len(y_test)):
dict_all = {}
for z in range(len(list_all)):
dict_all[list_all[z][i]] = dict_all.setdefault(list_all[z][i], 0) + 1
tmp_list = dict_all.items()
list_ensemble.append(sorted(tmp_list, lambda a, b: -cmp(a[1], b[1]))[0][0])
print score_lists(list_ensemble, y_test)
print '**************************'
if __name__ == "__main__":
for name in ['degree', 'position', 'salary', 'size']:
get_esembel_score(name)
# xg
# 2016 - 07 - 16
# 23:39:28
# 2016 - 07 - 16
# 23:58:37
# 2016 - 07 - 17
# 00:34:06
| apache-2.0 |
RPGOne/scikit-learn | sklearn/datasets/mldata.py | 31 | 7856 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname :
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test06_deep_reinforcement_learning/test_result_ppo_multipendulum_04052018/test4.py | 1 | 6441 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
import gym_multipendulum
EP_MAX = 3000 # Number of episodes
EP_LEN = 200 # Length of a single episode
GAMMA = 0.9 # Discount factor
A_LR = 0.0001 # Actor learning rate
C_LR = 0.0002 # Critic learning rate
BATCH = 32 # Batch size
A_UPDATE_STEPS = 10 # Actor update steps
C_UPDATE_STEPS = 10 # Critic update steps
S_DIM = 6 # Number of states
A_DIM = 3 # Number of actions
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
l1 = tf.layers.dense(self.tfs, 128, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / oldpi.prob(self.tfa)
surr = ratio * self.tfadv
if METHOD['name'] == 'kl_pen':
self.tflam = tf.placeholder(tf.float32, None, 'lambda')
kl = tf.distributions.kl_divergence(oldpi, pi)
self.kl_mean = tf.reduce_mean(kl)
self.aloss = -(tf.reduce_mean(surr - self.tflam * kl))
else: # clipping method, find this is better
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
with tf.variable_scope('atrain'):
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
tf.summary.FileWriter("log/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def update(self, s, a, r):
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
_, kl = self.sess.run(
[self.atrain_op, self.kl_mean],
{self.tfs: s, self.tfa: a, self.tfadv: adv, self.tflam: METHOD['lam']})
if kl > 4*METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10) # sometimes explode, this clipping is my solution
else: # clipping method, find this is better (OpenAI's paper)
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 128, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
# env = gym.make('Pendulum-v0').unwrapped
env = gym.make('multipendulum-v0').unwrapped
ppo = PPO()
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+30)/30) # normalize reward, find to be useful
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
ppo.update(bs, ba, br)
if ep == 0: all_ep_r.append(ep_r)
else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
print(
'Ep: %i' % ep,
"|Ep_r: %i" % ep_r,
("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
)
# plt.style.use('dark_background')
plt.plot(np.arange(len(all_ep_r)), all_ep_r)
plt.xlabel('Episode')
plt.ylabel('Moving averaged episode reward')
plt.show() | mit |
minhlongdo/scipy | scipy/special/add_newdocs.py | 4 | 71189 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| bsd-3-clause |
jmmease/pandas | asv_bench/benchmarks/hdfstore_bench.py | 6 | 3924 | from .pandas_vb_common import *
import os
class HDF5(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000),},
index=self.index)
self.df_mixed = DataFrame(
{'float1': randn(25000), 'float2': randn(25000),
'string1': (['foo'] * 25000),
'bool1': ([True] * 25000),
'int1': np.random.randint(0, 250000, size=25000),},
index=self.index)
self.df_wide = DataFrame(np.random.randn(25000, 100))
self.df2 = DataFrame({'float1': randn(25000), 'float2': randn(25000)},
index=date_range('1/1/2000', periods=25000))
self.df_wide2 = DataFrame(np.random.randn(25000, 100),
index=date_range('1/1/2000', periods=25000))
self.df_dc = DataFrame(np.random.randn(10000, 10),
columns=[('C%03d' % i) for i in range(10)])
self.f = '__test__.h5'
self.remove(self.f)
self.store = HDFStore(self.f)
self.store.put('fixed', self.df)
self.store.put('fixed_mixed', self.df_mixed)
self.store.append('table', self.df2)
self.store.append('table_mixed', self.df_mixed)
self.store.append('table_wide', self.df_wide)
self.store.append('table_wide2', self.df_wide2)
def teardown(self):
self.store.close()
def remove(self, f):
try:
os.remove(self.f)
except:
pass
def time_read_store(self):
self.store.get('fixed')
def time_read_store_mixed(self):
self.store.get('fixed_mixed')
def time_write_store(self):
self.store.put('fixed_write', self.df)
def time_write_store_mixed(self):
self.store.put('fixed_mixed_write', self.df_mixed)
def time_read_store_table_mixed(self):
self.store.select('table_mixed')
def time_write_store_table_mixed(self):
self.store.append('table_mixed_write', self.df_mixed)
def time_read_store_table(self):
self.store.select('table')
def time_write_store_table(self):
self.store.append('table_write', self.df)
def time_read_store_table_wide(self):
self.store.select('table_wide')
def time_write_store_table_wide(self):
self.store.append('table_wide_write', self.df_wide)
def time_write_store_table_dc(self):
self.store.append('table_dc_write', self.df_dc, data_columns=True)
def time_query_store_table_wide(self):
start = self.df_wide2.index[10000]
stop = self.df_wide2.index[15000]
self.store.select('table_wide', where="index > start and index < stop")
def time_query_store_table(self):
start = self.df2.index[10000]
stop = self.df2.index[15000]
self.store.select('table', where="index > start and index < stop")
def time_store_repr(self):
repr(self.store)
def time_store_str(self):
str(self.store)
def time_store_info(self):
self.store.info()
class HDF5Panel(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
self.p = Panel(randn(20, 1000, 25),
items=[('Item%03d' % i) for i in range(20)],
major_axis=date_range('1/1/2000', periods=1000),
minor_axis=[('E%03d' % i) for i in range(25)])
self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('p1', self.p)
def teardown(self):
self.store.close()
def remove(self, f):
try:
os.remove(self.f)
except:
pass
def time_read_store_table_panel(self):
self.store.select('p1')
def time_write_store_table_panel(self):
self.store.append('p2', self.p)
| bsd-3-clause |
ninoxcello/mscs710-project | src/SwaggerAPI/swagger_server/controllers/dataset_controller.py | 1 | 9358 | import connexion
import six
from swagger_server.models.api_response import ApiResponse # noqa: E501
from swagger_server.models.inline_image import InlineImage # noqa: E501
from swagger_server.models.inline_response200 import InlineResponse200 # noqa: E501
from swagger_server.models.table import Table # noqa: E501
from swagger_server import util
import sys
import os
sys.path.append(os.path.abspath("C:/Users/Matt/Documents/GitHub/mscs710-project/src"))
from utils import *
import datetime
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import warnings
from matplotlib import style
from subprocess import check_output
import models
import utils
import visuals
warnings.filterwarnings("ignore")
style.use('ggplot')
plt.rcParams['figure.figsize'] = (12.0, 8.0)
#Globals
currencies = visuals.load()
coin_type = 'bitcoin'
coin_feat = ['Close']
fileName = 'bitcoin_price.csv'
input_dir = "../input"
df = utils.load_data(input_dir, fileName)
graphType = 1
corr_choice = 1
model_type = 1
operation_type = 1
model, x_train, y_train, x_test, y_test, x_recent = utils.load_data(input_dir, fileName)
def dataset_currency_selection_currency_name_post(currencyName): # noqa: E501
"""Selects a specific currency type
The currency type should be either bitcoin, bitconnect, dash, ethereum, iota, litecoin, monero, nem, neo, numeraire, omisego, qtum, ripple, stratis, or waves # noqa: E501
:param currencyName: Should be equal to either bitcoin, bitconnect, dash, ethereum, iota, litecoin, monero, nem, neo, numeraire, omisego, qtum, ripple, stratis, or waves
:type currencyName: str
:rtype: ApiResponse
"""
#print('Enter Currency File Name: ')
# name = input()
global fileName
fileName = currencyName + '_price.csv'
global input_dir
global x_train, x_test, x_recent, y_train, y_test, df
x_train, x_test, x_recent, y_train, y_test, df = utils.load_data(input_dir, fileName)
print('Building Summary...')
summary = ''
summary += 'Date of newest data: {}'.format(df.index[0])
summary += 'Date of oldest data: {}\n'.format(df.index[-1])
summary += str(x_train.shape[0]) + 'training samples.'
summary += str(x_test.shape[0]) + 'test samples.'
summary += 'Predicting {} days'.format(x_recent.shape[0])
summary += 'Train sample shape: ' + str(x_train.shape)
summary += 'Test sample shape: ' + str(x_test.shape)
summary += 'Train label shape:' + str(y_train.shape)
summary += 'Test label shape:' + str(y_test.shape)
summary += 'Sample Data: '
summary += str(df.describe())
print('Successfully executed method')
return summary
def dataset_get_stats_get(): # noqa: E501
"""Get Statistics for the Dataset
Returns values of count, mean, median, mode, min, max, standard deviation (std) for the dataset # noqa: E501
:rtype: InlineResponse200
"""
return 'Method Not Supported; Please use the currency selection POST request instead'
def dataset_get_table_table_name_post(tableName): # noqa: E501
"""Returns the specified table
Returns the HTML Representation of the table. # noqa: E501
:param tableName: Should be equal to either bitcoin, bitconnect, dash, ethereum, iota, litecoin, monero, nem, neo, numeraire, omisego, qtum, ripple, stratis, or waves
:type tableName: str
:rtype: Table
"""
print(currencies[tableName].head())
return str(currencies[tableName].head())
def dataset_graph_correlation_correlation_type_post(correlationType): # noqa: E501
"""Returns a graph utilizing the selected correlation type
1. Spearman, 2. Pearson, 3. Kendall # noqa: E501
:param correlationType: Must be a number equal to 1, 2, or 3
:type correlationType: int
:rtype: InlineImage
"""
global corr_choice
corr_choice = correlationType
if corr_choice == 1:
return visuals.plot_correlation('spearman')
elif corr_choice == 2:
return visuals.plot_correlation('pearson')
elif corr_choice == 3:
return visuals.plot_correlation('kendall')
return {"status": 400,
"message": 'Error: The given number was not 1, 2, or 3'}
def dataset_graph_selection_graph_selection_post(graphSelection): # noqa: E501
"""Selects a graph type
1. Trend Curve, 2. Candlestick, 3. Correlation Map # noqa: E501
:param graphSelection: Must be a number equal to 1, 2, or 3
:type graphSelection: int
:rtype: ApiResponse
"""
global graphType
graphType = graphSelection
if graphType == 1:
visuals.plot_trend(currencies, coin_type, coin_feat)
elif graphType == 2:
visuals.plot_candlestick(currencies, coin_type, coin_feat)
elif graphType == 3:
if corr_choice == 1:
return visuals.plot_correlation('spearman')
elif corr_choice == 2:
return visuals.plot_correlation('pearson')
elif corr_choice == 3:
return visuals.plot_correlation('kendall')
return {"status": 400,
"message": 'Error: The given number was not 1, 2, or 3'}
def dataset_model_type_model_type_post(modelType): # noqa: E501
"""Selects a model type
1. Linear Regression, 2. Support Vector Regression, 3. Multilayer Perceptron, 4. Gradient Boosting Regression # noqa: E501
:param modelType: Must be a number equal to 1, 2, 3, or 4
:type modelType: int
:rtype: ApiResponse
"""
global model_type
model_type = modelType
global model
if model_type == 1:
model = models.LR(x_train, y_train, x_test, y_test, x_recent)
return {"status": 200,
"message": 'OK: Linear Regression model selected'}
elif model_type == 2:
model = models.SVR(x_train, y_train, x_test, y_test, x_recent)
return {"status": 200,
"message": 'OK: Support Vector Regression model selected'}
elif model_type == 3:
model = models.MLP(x_train, y_train, x_test, y_test, x_recent)
model.build()
return {"status": 200,
"message": 'OK: Multilayer Perceptron model selected'}
elif model_type == 4:
model = models.GBR(x_train, y_train, x_test, y_test, x_recent)
return {"status": 200,
"message": 'OK: Gradient Boosting Regression model selected'}
return {"status": 400,
"message": 'Error: The given number was not 1, 2, 3, or 4'}
def dataset_operation_type_operation_type_post(operationType): # noqa: E501
"""Selects what operation to do, either training, testing, or prediction
1. Train, 2. Test, 3. Predict # noqa: E501
:param operationType: Must be a number equal to 1, 2, or 3
:type operationType: int
:rtype: ApiResponse
"""
global operation_type
operation_type = operationType
global model
if model_type == 1:
model = models.LR(x_train, y_train, x_test, y_test, x_recent)
print('Linear Regression model selected.\n')
elif model_type == 2:
model = models.SVR(x_train, y_train, x_test, y_test, x_recent)
print('Support Vector Regression model selected.\n')
elif model_type == 3:
model = models.MLP(x_train, y_train, x_test, y_test, x_recent)
model.build()
print('Multilayer Perceptron model selected.\n')
elif model_type == 4:
model = models.GBR(x_train, y_train, x_test, y_test, x_recent)
print('Gradient Boosting Regression model selected.\n')
if operation_type == 1:
print('Training initiated...\n')
model.train()
return {"status": 200,
"message": 'OK'}
elif operation_type == 2:
print('Evaluating model on test data...\n')
model.test()
return {"status": 200,
"message": 'OK'}
elif operation_type == 3:
print('Predicting future values...\n')
preds = model.predict()
print('Forecast Plot')
return utils.forecast_plot(df, preds)
return {"status": 400,
"message": 'Error: The given number was not 1, 2, or 3'}
def dataset_train_currency_name_post(currencyName): # noqa: E501
"""Trains based on the currency selected
Should be either bitcoin, bitconnect, dash, ethereum, iota, litecoin, monero, nem, neo, numeraire, omisego, qtum, ripple, stratis, or waves # noqa: E501
:param currencyName: Should be equal to either bitcoin, bitconnect, dash, ethereum, iota, litecoin, monero, nem, neo, numeraire, omisego, qtum, ripple, stratis, or waves
:type currencyName: str
:rtype: ApiResponse
"""
#print('Enter Currency File Name: ')
# name = input()
global fileName
fileName = currencyName+"_price.csv"
global x_train, x_test, x_recent, y_train, y_test, df
x_train, x_test, x_recent, y_train, y_test, df = utils.load_data(input_dir, fileName)
print('---------------------------------------')
print(x_train.shape[0], 'training samples.')
print(x_test.shape[0], 'test samples.')
print('Predicting {} days'.format(x_recent.shape[0]))
print('Train sample shape: ', x_train.shape)
print('Test sample shape: ', x_test.shape)
print('Train label shape:', y_train.shape)
print('Test label shape:', y_test.shape)
return {"status": 200,
"message": 'OK'}
| mit |
RDCEP/ggcmi | bin/maprescaled/mapplot.par.py | 1 | 4809 | #!/usr/bin/env python
# import modules
import re, matplotlib, sys
from os import listdir
from os.path import split, sep, splitext
import matplotlib.pyplot as plt
from netCDF4 import Dataset as nc
from optparse import OptionParser
from mpl_toolkits.basemap import Basemap
from numpy import logical_and, arange, meshgrid, double, ceil
parser = OptionParser()
parser.add_option("-b", "--batch", dest = "batch", default = "1", type = "int",
help = "Batch to process")
parser.add_option("-n", "--numbatches", dest = "num_batches", default = "64", type = "int",
help = "Total number of batches")
parser.add_option("-d", "--dir", dest = "dir", default = "", type = "string",
help = "Directory of netCDF files")
parser.add_option("-t", "--time", dest = "time", default = "", type = "string",
help = "Time to plot ('all' = all years)")
parser.add_option("-s", "--scen", dest = "scen", default = "", type = "string",
help = "Name of scenario to plot")
parser.add_option("-r", "--irr", dest = "irr", default = "", type = "string",
help = "Name of irrigation type to plot")
parser.add_option("-v", "--var", dest = "var", default = "", type = "string",
help = "Variable to plot")
parser.add_option("-l", "--maplimits", dest = "maplimits", default = "", type = "string",
help = "Comma-separated of lat0,lat1,lon0,lon1 limits (default = blank)")
parser.add_option("-c", "--cblimits", dest = "cblimits", default = "", type = "string",
help = "Comma-separated of colorbar limits (default = blank)")
parser.add_option("-o", "--outputdir", dest = "outputdir", default = "", type = "string",
help = "Output directory")
options, args = parser.parse_args()
files = [f for f in listdir(options.dir) if f.endswith('.nc4')]
nfiles = len(files)
batch = options.batch # find out start and end indices for batch
numbatches = options.num_batches
bz = int(ceil(double(nfiles) / numbatches))
si = bz * (batch - 1)
ei = nfiles if batch == numbatches else min(si + bz, nfiles)
if si >= nfiles: # no work for processor to do
print 'No jobs for processor to perform. Exiting . . .'
sys.exit()
files = files[si : ei] # select files for batch
nfiles = len(files)
for i in range(nfiles):
filein = options.dir + sep + files[i]
crop = split(filein)[1].split('_')[3]
with nc(filein) as f:
lat = f.variables['lat'][:]
lon = f.variables['lon'][:]
scen = f.variables['scen'].long_name.split(', ')
irr = f.variables['irr'].long_name.split(', ')
time = f.variables['time'][:]
tunits = f.variables['time'].units
var = f.variables[options.var + '_' + crop]
varunits = var.units if 'units' in var.ncattrs() else ''
var = var[:]
scen_idx = scen.index(options.scen)
irr_idx = irr.index(options.irr)
var = var[:, :, :, scen_idx, irr_idx]
time += int(re.findall(r'\d+', tunits)[0])
if options.time == 'all':
var = var.mean(axis = 0)
years_str = years = str(time[0]) + '-' + str(time[-1])
else:
y = options.time.split('-')
if len(y) == 1:
var = var[time == int(y[0])].mean(axis = 0)
years_str = y[0]
elif len(y) == 2:
tidx = logical_and(time >= int(y[0]), time <= int(y[1]))
var = var[tidx].mean(axis = 0)
years_str = str(time[tidx][0]) + '-' + str(time[tidx][-1])
else:
raise Exception('Unrecognized -t option')
# get latitude limits
if options.maplimits != '':
lat0, lat1, lon0, lon1 = [double(l) for l in options.maplimits.split(',')]
else:
lat0 = -90
lat1 = 90
# get colorbar limits
if options.cblimits != '':
pmin, pmax = [double(l) for l in options.cblimits.split(',')]
else:
pmin = var.min()
pmax = var.max()
# plot
m = Basemap(llcrnrlon = lon0, llcrnrlat = lat0, urcrnrlon = lon1, urcrnrlat = lat1, \
resolution = 'c', projection = 'cyl')
lons, lats = meshgrid(lon, lat)
x, y = m(lons, lats)
cs = m.pcolor(x, y, var, vmin = pmin, vmax = pmax, cmap = matplotlib.cm.YlGn)
cbar = m.colorbar(cs, location = 'right')
m.drawcoastlines()
m.drawstates(linewidth = 0.2)
m.drawmapboundary()
m.drawcountries(linewidth = 0.2)
m.drawparallels(arange(90, -110, -30), labels = [1, 0, 0, 0])
m.drawmeridians(arange(-180, 180, 60), labels = [0, 0, 0, 1])
plt.title(options.var + ' (' + varunits + '), ' + options.scen + '-' + options.irr + ', ' + years_str)
# save
plt.savefig(options.outputdir + sep + splitext(files[i])[0] + '.' + options.var + '.png')
plt.close() | agpl-3.0 |
acimmarusti/isl_exercises | chap5/chap5ex8.py | 1 | 2414 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split, LeaveOneOut, KFold
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
import statsmodels.formula.api as smf
#Calculated mean error on validation sets#
def mean_cv_err(x_data, y_data, cvobj, regobj):
data_size = len(x_data)
data_shape = x_data.shape
if len(data_shape) > 1:
xdata = np.reshape(np.array(x_data), data_shape)
else:
xdata = np.reshape(np.array(x_data), (data_size, 1))
ydata = np.reshape(y_data, (data_size, 1))
cv_errs = []
for train_idx, test_idx in cvobj.split(xdata):
xtrain = xdata[train_idx]
xtest = xdata[test_idx]
ytrain = ydata[train_idx]
ytest = ydata[test_idx]
res_reg = regobj.fit(xtrain, ytrain)
pred_reg = res_reg.predict(xtest)
#Reshape necessary because predition produces a (1, n) numpy array, while ytest is (n, 1)#
cv_errs.append(np.mean(np.power(np.reshape(ytest, pred_reg.shape) - pred_reg, 2)))
mean_err_out = np.mean(cv_errs)
return mean_err_out
#LOOCV strategy#
def loocv_err(x_data, y_data):
#Leave One Out Cross-validation#
loo = LeaveOneOut()
llreg = LinearRegression()
return mean_cv_err(x_data, y_data, loo, llreg)
#Simulated Data#
np.random.seed(1)
x = np.random.standard_normal(100)
y = x - 2 * np.square(x) + np.random.standard_normal(100)
data = pd.DataFrame()
data['y'] = y
data['x'] = x
data['x2'] = np.square(x)
data['x3'] = np.power(x, 3)
data['x4'] = np.power(x, 4)
plt.scatter(x, y)
plt.xlabel('x')
plt.ylabel('y')
#Compute LOOCV errors#
print('\nLOOCV error for linear model')
print(loocv_err(data['x'], data['y']))
print('\nLOOCV error for quadratic model')
print(loocv_err(data[['x','x2']], data['y']))
print('\nLOOCV error for cubic model')
print(loocv_err(data[['x','x2','x3']], data['y']))
print('\nLOOCV error for quartic model')
print(loocv_err(data[['x','x2','x3','x4']], data['y']))
#Linear regression#
linfit = smf.ols(formula='y ~ x + np.power(x, 2) + np.power(x, 3) + np.power(x, 4)', data=data).fit()
print(linfit.summary())
plt.show()
| gpl-3.0 |
sgranitz/northwestern | predict420/grex3_Hotel_Reviews.py | 2 | 4910 | # Stephan Granitz [ GrEx3 ]
# Import libraries
import pandas as pd
import numpy as np
import json
import glob
import os
import re
import pickle
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
# Starter: Get a Look At Some of The Hotel Data
# Read what's in the hotel file 100506.json
folder = 'C:/Users/sgran/Desktop/GrEx3/'
os.chdir(folder)
io1 = folder + '100506.json'
data = json.load(open(io1))
print(type(data), ' - ', data.keys())
reviews = data['Reviews']
info = data['HotelInfo']
print(type(reviews), ' - ', type(info))
print(info.keys())
print(info['HotelID'], ' - ', len(reviews), ' - ', type(reviews[0]))
print(reviews[0].keys())
print(reviews[0]['Ratings'])
pd.io.json.json_normalize(reviews).head(3).transpose().head(8)
# Part 1: Numeric Peceptual Mapping Data
def check_json(file):
try:
json.load(file)
except ValueError:
return False
return True
def check_key(dictionary, key):
if key in dictionary.keys():
return dictionary[key]
return np.nan
json_list = glob.glob('*.json')
# (a) Build it as a pandas DataFrame.
# DataFrame should have a row for each hotel review
hotel_rev = pd.DataFrame()
for json_file in json_list:
with open(json_file) as json_data:
if check_json(json_data) == False:
json_list.remove(json_file)
next
path = folder + json_file
data = json.load(open(path))
info = data['HotelInfo']
hotel_name = check_key(info, 'Name')
hotel_id = check_key(info, 'HotelID')
if pd.isnull(hotel_name):
hotel_name = re.compile(r'(?<=\-)([A-Z].*?)(?=\-)').search(
info['HotelURL']).group().replace('_', ' ')
reviews = data['Reviews']
revs_df = pd.io.json.json_normalize(reviews)
revs_df['HotelName'] = hotel_name
revs_df['HotelID'] = hotel_id
hotel_rev = hotel_rev.append(revs_df)
hotel_rev.replace('-1', np.nan, inplace=True)
print(hotel_rev.info())
print(hotel_rev['HotelName'].unique())
hotel_rev.head(3).transpose().head(6)
cols = ['HotelID', 'HotelName', 'Date', 'ReviewID', 'Author']
rtg_cols = [col for col in list(hotel_rev) if col.startswith('Ratings')]
cols += rtg_cols
# (b) Report the number of reviews for each hotel in the DataFrame.
reviews_df = hotel_rev[cols]
print(reviews_df['HotelName'].value_counts())
# (b) Calculate and report statistics describing
# the distribution of the overall rating received by the hotels.
rating_stats = pd.DataFrame()
for col in reviews_df[rtg_cols]:
rating_stats[col] = pd.to_numeric(reviews_df[col], errors='coerce')
rating_stats.describe().transpose()
# (c) Save your DataFrame by pickling it,
# and verify that your DataFrame was saved correctly.
reviews_out = open('hotel_reviews', 'wb')
pickle.dump(reviews_df, reviews_out)
reviews_out.close()
test_pickle = open('hotel_reviews', 'rb')
valid = pickle.load(test_pickle)
# Part 2: Text Data for Perceptual Mapping
stop_words = set(stopwords.words('english'))
# 1 Create one string of the contents of comments about the hotel
# 2 Clean the string of all html tags, punctuation, etc.
# 3 Convert the string to a list of words.
# 4 Remove all stop words from the list
# 5 You might want to do word stemming
# 6 Create a dict fom the list in which the keys
# are the "content" words, and their values are
# the number of times each word occurs.
hotel_dict = {}
for hotel in hotel_rev['HotelID'].unique():
temp_df = hotel_rev.loc[hotel_rev['HotelID'] == hotel]
words = temp_df['Content'].str.cat(sep=' ')
words_nohtml = re.compile('<.*?>').sub('', words)
words_az = word_tokenize(
re.compile('[^a-zA-Z]').sub(' ', words_nohtml).lower())
words_filtered = []
ps = PorterStemmer()
for word in words_az:
if word not in stop_words:
words_filtered.append(ps.stem(word))
content_dict = {}
for word in words_filtered:
if word in content_dict:
content_dict[word] += 1
else:
content_dict[word] = 1
hotel_dict[hotel] = content_dict
# Create for each hotel a dict of comment content words and
# their frequencies, the counts of their occurrences.
# Add each of the hotel content word dicts to a dict with their
# hotel IDs as the keys.
print(hotel_dict.keys())
print(type(hotel_dict), ' - ', type(hotel_dict['100506']),
' - ', len(hotel_dict['100506']))
# Write this dict to a json file,
# and verify that it is written correctly.
with open('hotel.json', 'w') as hd:
json.dump(hotel_dict, hd)
with open('hotel.json') as json_data:
if check_json(json_data) == True:
print('It worked.')
# Report the number of unique content words
# in each of the hotel's dicts
num_words = {}
for hotel in hotel_dict:
num_words[hotel] = len(hotel_dict[hotel])
print(num_words)
| mit |
amueller/advanced_training | mglearn/plot_improper_preprocessing.py | 2 | 3016 | import matplotlib.pyplot as plt
def make_bracket(s, xy, textxy, width, ax):
annotation = ax.annotate(
s, xy, textxy, ha="center", va="center", size=20,
arrowprops=dict(arrowstyle="-[", fc="w", ec="k",
lw=2,), bbox=dict(boxstyle="square", fc="w"))
annotation.arrow_patch.get_arrowstyle().widthB = width
def plot_improper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 10))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9], left=[0, 12, 15],
color=['white', 'grey', 'grey'], hatch="//")
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 6)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(13.5, -.3, "validation fold",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(17.5, -.3, "test set",
fontdict={'fontsize': 14}, horizontalalignment="center")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
def plot_proper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 8))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9],
left=[0, 12, 15], color=['white', 'grey', 'grey'], hatch="//")
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 4.5)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds", fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(13.5, -.3, "validation fold", fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(17.5, -.3, "test set", fontdict={'fontsize': 14}, horizontalalignment="center")
make_bracket("scaler fit", (6, 1.3), (6, 2.), 12, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
fig.subplots_adjust(hspace=.3)
| bsd-2-clause |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/mu_hurricane.py | 1 | 2469 | from kapteyn import maputils
from matplotlib import pyplot as plt
from kapteyn import tabarray
import numpy
def plotcoast(fn, pxlim, pylim, col='k'):
coasts = tabarray.tabarray(fn, comchar='s') # Read two columns from file
for segment in coasts.segments:
coastseg = coasts[segment].T
xw = coastseg[1]; yw = coastseg[0] # First one appears to be Latitude
xs = xw; ys = yw # Reset lists which store valid pos.
if 1:
# Mask arrays if outside plot box
xp, yp = annim.projection.topixel((numpy.array(xs),numpy.array(ys)))
xp = numpy.ma.masked_where(numpy.isnan(xp) |
(xp > pxlim[1]) | (xp < pxlim[0]), xp)
yp = numpy.ma.masked_where(numpy.isnan(yp) |
(yp > pylim[1]) | (yp < pylim[0]), yp)
# Mask array could be of type numpy.bool_ instead of numpy.ndarray
if numpy.isscalar(xp.mask):
xp.mask = numpy.array(xp.mask, 'bool')
if numpy.isscalar(yp.mask):
yp.mask = numpy.array(yp.mask, 'bool')
# Count the number of positions in these list that are inside the box
j = 0
for i in range(len(xp)):
if not xp.mask[i] and not yp.mask[i]:
j += 1
if j > 200: # Threshold to prevent too much detail and big pdf's
frame.plot(xp.data, yp.data, color=col)
# Get a header and change some values
f = maputils.FITSimage("m101.fits")
header = f.hdr
header['CDELT1'] = 0.1
header['CDELT2'] = 0.1
header['CRVAL1'] = 285
header['CRVAL2'] = 20
# Use the changed header as external source for new object
f = maputils.FITSimage(externalheader=header, externaldata=f.dat)
fig = plt.figure()
frame = fig.add_subplot(1,1,1)
annim = f.Annotatedimage(frame, cmap="YlGn")
annim.Image()
grat = annim.Graticule()
grat.setp_ticklabel(wcsaxis=0, fmt="%g^{\circ}")
grat.setp_ticklabel(wcsaxis=1, fmt='Dms')
grat.setp_axislabel(plotaxis='bottom', label='West - East')
grat.setp_axislabel(plotaxis='left', label='South - North')
annim.plot()
annim.projection.allow_invalid = True
# Plot coastlines in black, borders in red
plotcoast('WDB/namer-cil.txt', annim.pxlim, annim.pylim, col='k')
plotcoast('WDB/namer-bdy.txt', annim.pxlim, annim.pylim, col='r')
plotcoast('WDB/samer-cil.txt', annim.pxlim, annim.pylim, col='k')
plotcoast('WDB/samer-bdy.txt', annim.pxlim, annim.pylim, col='r')
annim.interact_imagecolors()
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
thilbern/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
jcesardasilva/agpy | agpy/asinh_norm.py | 6 | 3869 | """
------------------------------
Arcsinh Colorbar Normalization
------------------------------
For use with, e.g., imshow -
imshow(myimage, norm=AsinhNorm())
Some of the ideas used are from `aplpy <aplpy.github.com>`_
"""
from matplotlib.colors import Normalize
from matplotlib.cm import cbook
from numpy import ma
import numpy as np
class AsinhNorm(Normalize):
def __init__(self, vmin=None, vmax=None, clip=False, vmid=None):
self.vmid = vmid
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self,value, clip=None, midpoint=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
vmid = self.vmid if self.vmid is not None else (vmax+vmin)/2.0
if midpoint is None:
midpoint = (vmid - vmin) / (vmax - vmin)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
#result = (ma.arcsinh(val)-np.arcsinh(vmin))/(np.arcsinh(vmax)-np.arcsinh(vmin))
result = ma.arcsinh(result/midpoint) / ma.arcsinh(1./midpoint)
if vtype == 'scalar':
result = result[0]
return result
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
if self.vmid is None:
self.vmid = (self.vmax+self.vmin)/2.0
#return np.arcsinh(array/midpoint) / np.arcsinh(1./midpoint)
class SinhNorm(Normalize):
def __init__(self, vmin=None, vmax=None, clip=False, vmid=None):
self.vmid = vmid
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self,value, clip=None, midpoint=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
vmid = self.vmid if self.vmid is not None else (vmax+vmin)/2.0
if midpoint is None:
midpoint = (vmid - vmin) / (vmax - vmin)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
#result = (ma.arcsinh(val)-np.arcsinh(vmin))/(np.arcsinh(vmax)-np.arcsinh(vmin))
result = ma.sinh(result/midpoint) / ma.sinh(1./midpoint)
if vtype == 'scalar':
result = result[0]
return result
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
if self.vmid is None:
self.vmid = (self.vmax+self.vmin)/2.0
#return np.arcsinh(array/midpoint) / np.arcsinh(1./midpoint)
| mit |
ioos/system-test | Theme_1_Baseline/Scenario_1B_CoreVariable_Strings/Scenario_1B_CoreVariable_Strings.py | 2 | 7535 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from utilities import css_styles
css_styles()
# <markdowncell>
# # IOOS System Test - Theme 1 - Scenario B - [Description](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-1-baseline-assessment)
#
# ## Core Variable Strings
#
# This notebook looks at the IOOS core variables and uses the Marine Metadata Interoperability SPARQL endpoint to convert them to CF Standard Names. Each IOOS CSW server is then queryied for the CF standard name that is associated with an IOOS Core Variable.
#
# ## Questions
# 1. Using a list of Core IOOS Variables and the MMI SPARQL service, can we search and quantify records from CSW endpoints that relate to core variables?
# <markdowncell>
# #### Get a list of the IOOS Core Variables from MMI
# <codecell>
# Using RDF
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
g = Graph()
g.load("http://mmisw.org/ont/ioos/core_variable")
core_var_uri = URIRef('http://mmisw.org/ont/ioos/core_variable/Core_Variable')
core_var_name_uri = URIRef('http://mmisw.org/ont/ioos/core_variable/name')
core_var_def_uri = URIRef('http://mmisw.org/ont/ioos/core_variable/definition')
core_variables = []
for cv in g.subjects(predicate=RDF.type, object=core_var_uri):
name = g.value(subject=cv, predicate=core_var_name_uri).value
definition = g.value(subject=cv, predicate=core_var_def_uri).value
core_variables.append((name, definition))
# <codecell>
import pandas as pd
core_variables_names = [x for x,y in core_variables]
pd.DataFrame.from_records(core_variables, columns=("Name", "Definition",))
# <markdowncell>
# <div class="error"><strong>Programmatic access to Core Variables</strong> - This isn't straight forward and should be abstracted into a library. See: https://github.com/ioos/system-test/issues/128</div>
# <markdowncell>
# #### Query MMI for CF standard names related to the IOOS Core Variables
# <codecell>
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("http://mmisw.org/sparql")
query = """
PREFIX ioos: <http://mmisw.org/ont/ioos/parameter/>
SELECT DISTINCT ?cat ?parameter ?property ?value
WHERE {?parameter a ioos:Parameter .
?parameter ?property ?value .
?cat skos:narrowMatch ?parameter .
FILTER (regex(str(?property), "Match", "i") && regex(str(?value), "cf", "i") )
}
ORDER BY ?cat ?parameter
"""
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
j = sparql.query().convert()
cf_standard_uris = list(set([ x["value"]["value"] for x in j.get("results").get("bindings") ]))
cf_standard_names = map(lambda x: x.split("/")[-1], cf_standard_uris)
pd.DataFrame.from_records(zip(cf_standard_names, cf_standard_uris), columns=("CF Name", "CF URI",))
# <markdowncell>
# ### Searching CSW servers on variable names
# <codecell>
# https://github.com/ioos/system-test/wiki/Service-Registries-and-Data-Catalogs
known_csw_servers = ['http://data.nodc.noaa.gov/geoportal/csw',
'http://www.nodc.noaa.gov/geoportal/csw',
'http://www.ngdc.noaa.gov/geoportal/csw',
'http://cwic.csiss.gmu.edu/cwicv1/discovery',
'http://geoport.whoi.edu/geoportal/csw',
'https://edg.epa.gov/metadata/csw',
'http://cmgds.marine.usgs.gov/geonetwork/srv/en/csw',
'http://cida.usgs.gov/gdp/geonetwork/srv/en/csw',
'http://geodiscover.cgdi.ca/wes/serviceManagerCSW/csw',
'http://geoport.whoi.edu/gi-cat/services/cswiso',
'https://data.noaa.gov/csw',
]
# <markdowncell>
# #### Subset which variables we should query by
# <codecell>
# Query on Waves
variables_to_query = [ x for x in cf_standard_names if "sea_surface_height" in x ]
custom_variables = [u"sea_surface_height", u"sea_surface_elevation"]
variables_to_query += custom_variables
variables_to_query
# <markdowncell>
# <div class="error"><strong>Missing CF Standard Names</strong> - "sea_surface_height" and "sea_surface_elevation" are valid CF Aliases but are not returned by MMI when running the SPARQL query. We added them here manually. See: https://github.com/ioos/system-test/issues/129</div>
# <markdowncell>
# #### Construct CSW Filters
# <codecell>
from owslib import fes
cf_name_filters = []
for cf_name in variables_to_query:
text_filter = fes.PropertyIsLike(propertyname='apiso:AnyText', literal="*%s*" % cf_name, wildCard='*')
cf_name_filters.append(text_filter)
# <markdowncell>
# #### Query each CSW catalog for the cf_name_filters constructed above
# <codecell>
from owslib.csw import CatalogueServiceWeb
from utilities import normalize_service_urn
var_results = []
for x in range(len(cf_name_filters)):
var_name = variables_to_query[x]
single_var_filter = cf_name_filters[x]
for url in known_csw_servers:
try:
csw = CatalogueServiceWeb(url, timeout=20)
csw.getrecords2(constraints=[single_var_filter], maxrecords=1000, esn='full')
for record, item in csw.records.items():
for d in item.references:
result = dict(variable=var_name,
scheme=normalize_service_urn(d['scheme']),
url=d['url'],
server=url,
title=record.title())
var_results.append(result)
except BaseException, e:
print "- FAILED: %s - %s" % (url, e)
# <markdowncell>
# <div class="error"><strong>Paginating CSW Records</strong> - Some servers have a maximum amount of records you can retrieve at once. See: https://github.com/ioos/system-test/issues/126</div>
# <markdowncell>
# #### Load results into a Pandas DataFrame
# <codecell>
%matplotlib inline
import pandas as pd
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 500)
from IPython.display import HTML
df = pd.DataFrame(var_results)
df = df.drop_duplicates()
# <markdowncell>
# #### Results by variable
# <codecell>
by_variable = pd.DataFrame(df.groupby("variable").size(), columns=("Number of services",))
by_variable.sort('Number of services', ascending=False).plot(kind="barh", figsize=(10,8,))
# <markdowncell>
# #### The number of service types for each variable
# <codecell>
import math
var_service_summary = pd.DataFrame(df.groupby(["variable", "scheme"], sort=True).size(), columns=("Number of services",))
#HTML(model_service_summary.to_html())
var_service_plotter = var_service_summary.unstack("variable")
var_service_plot = var_service_plotter.plot(kind='barh', subplots=True, figsize=(12,120), sharey=True)
# <markdowncell>
# #### Variables per CSW server
# <codecell>
variables_per_csw = pd.DataFrame(df.groupby(["variable", "server"]).size(), columns=("Number of services",))
#HTML(records_per_csw.to_html())
var_csw_plotter = variables_per_csw.unstack("variable")
var_csw_plot = var_csw_plotter.plot(kind='barh', subplots=True, figsize=(12,30,), sharey=True)
# <markdowncell>
# #### Variables per CSW server
# <codecell>
csws_per_variable = pd.DataFrame(df.groupby(["variable", "server"]).size(), columns=("Number of variables",))
#HTML(records_per_csw.to_html())
csw_var_plotter = csws_per_variable.unstack("server")
csw_var_plot = csw_var_plotter.plot(kind='barh', subplots=True, figsize=(12,30,), sharey=True)
# <codecell>
| unlicense |
mne-tools/mne-tools.github.io | 0.20/_downloads/9e3503f1d4e831b7b292f1805e5c540b/plot_evoked_topomap.py | 1 | 5147 | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <[email protected]>
# Tal Linzen <[email protected]>
# Denis A. Engeman <[email protected]>
# Mikołaj Magnuski <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
###############################################################################
# Basic `plot_topomap` options
# ----------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
###############################################################################
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
###############################################################################
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
###############################################################################
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
###############################################################################
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
###############################################################################
# Additional `plot_topomap` options
# ---------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
###############################################################################
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. By default ``extrapolate='box'`` is used which
# extrapolates to a large box stretching beyond the head circle.
# Compare this with ``extrapolate='head'`` (second topography below) where
# extrapolation goes to 0 at the head circle and ``extrapolate='local'`` where
# extrapolation is performed only within some distance from channels:
extrapolations = ['box', 'head', 'local']
fig, axes = plt.subplots(figsize=(7.5, 2.5), ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for ax, extr in zip(axes, extrapolations):
evoked.plot_topomap(0.1, ch_type='eeg', size=2, extrapolate=extr, axes=ax,
show=False, colorbar=False, sphere=(0., 0., 0., 0.09))
ax.set_title(extr, fontsize=14)
###############################################################################
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s', extrapolate='local', border='mean')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
###############################################################################
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation
# (animates only in matplotlib interactive mode)
evoked.animate_topomap(ch_type='mag', times=times, frame_rate=10,
time_unit='s')
| bsd-3-clause |
vikingMei/mxnet | example/rcnn/rcnn/pycocotools/coco.py | 41 | 19083 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
| apache-2.0 |
andrewnc/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/bayes_opt.py | 1 | 3306 | """ A simple genetic algorithm for parameter search """
from __future__ import print_function
from collections import OrderedDict
try:
from bayes_opt import BayesianOptimization
bayes_opt_present=True
except Exception:
BayesianOptimization = None
bayes_opt_present=False
from kernel_tuner.strategies import minimize
supported_methods = ["poi", "ei", "ucb"]
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
#Bayesian Optimization strategy seems to need some hyper parameter tuning to
#become better than random sampling for auto-tuning GPU kernels.
#alpha, normalize_y, and n_restarts_optimizer are options to
#https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html
#defaults used by Baysian Optimization are:
# alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces
# n_restarts_optimizer=5,
# normalize_y=True,
#several exploration friendly settings are: (default is acq="ucb", kappa=2.576)
# acq="poi", xi=1e-1
# acq="ei", xi=1e-1
# acq="ucb", kappa=10
if not bayes_opt_present:
raise ImportError("Error: optional dependency Bayesian Optimization not installed")
#defaults as used by Bayesian Optimization Python package
acq = tuning_options.strategy_options.get("method", "poi")
kappa = tuning_options.strategy_options.get("kappa", 2.576)
xi = tuning_options.strategy_options.get("xi", 0.0)
init_points = tuning_options.strategy_options.get("popsize", 5)
n_iter = tuning_options.strategy_options.get("maxiter", 25)
tuning_options["scaling"] = True
results = []
#function to pass to the optimizer
def func(**kwargs):
args = [kwargs[key] for key in tuning_options.tune_params.keys()]
return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)
bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)
pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))
verbose=0
if tuning_options.verbose:
verbose=2
optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)
optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)
if tuning_options.verbose:
print(optimizer.max)
return results, runner.dev.get_environment()
| apache-2.0 |
waltermateriais/xutils | xpython/xplot/__init__.py | 1 | 7597 | #!/usr/bin/python
# __init__.py
# by Walter Dal'Maz Silva
# on 30th September 2016
__version__ = '0.1.0'
DEBUG_MODULE = False
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import subprocess
import matplotlib.pyplot as plt
from PIL import Image
from xutils.__core__ import _print_head, _run, _get_kwarg
# ----------------------------------------------------------------------------
# Compile gnuplot figures
# ----------------------------------------------------------------------------
def _compile_gnuplot(sources, **kwargs):
""" Compiles gnuplot figures.
sources : directories to look for scriptf
kwargs :
- scriptf : name of plotting scrips, default 'plot.gp'
- compfig : force compile or re-compile, default 'False'
- shell : terminal used, default 'bash'
- figname : compiled figure name, default 'figure.pdf'
"""
if DEBUG_MODULE: _print_head(_compile_gnuplot.__name__, kwargs)
scriptf = _get_kwarg('scriptf', kwargs, 'plot.gp')
compfig = _get_kwarg('compfig', kwargs, False)
shell = _get_kwarg('shell', kwargs, 'bash')
figname = _get_kwarg('figname', kwargs, 'figure.pdf')
if type(sources) == str: sources = [sources]
for src in sources:
print('\nCompiling', src)
if not os.path.isfile(os.path.join(src, figname)) or compfig:
_run([shell, scriptf], 'compile.log', cwd=src, operation='compile')
else:
print('no compiling needed...')
# ----------------------------------------------------------------------------
# Convert and generate final plot
# Relevant snippets from:
# - stackoverflow.com/questions/9295026
# - matplotlib.org/examples/pylab_examples/subplots_demo.html
# - stackoverflow.com/questions/6541123/
# - stackoverflow.com/questions/39578141
# ----------------------------------------------------------------------------
def _plot_stacked(sources, **kwargs):
""" Assembly stacked images from sources.
Assumes 'convert' is installed and is callable in the OS.
sources : directories to look for figures
kwargs :
- convert : force re-converting figure to png, default False
- density : pixel density argument to convert, default '600'
- rzwidth : resize width in pixel for final figure, default '600'
- figsize : dimensions in inches for final figure, default (3,3)
- extname : source figures default extension, default 'png'
- backgrd : background color in final figure*(1), default '#FFFFFF'
- figname : name to look for in sources, default 'figure.pdf'
- outname : name of compiled figure, default 'figure.png'
- pltshow : show interative plot allowing manual setup, default False
(1) if figures are transparent *png*
"""
if DEBUG_MODULE: _print_head(plot_stacked.__name__, kwargs)
convert = _get_kwarg('convert', kwargs, False)
density = _get_kwarg('density', kwargs, '600')
rzwidth = _get_kwarg('rzwidth', kwargs, '600')
figsize = _get_kwarg('figsize', kwargs, (3,3))
extname = _get_kwarg('extname', kwargs, 'png')
backgrd = _get_kwarg('backgrd', kwargs, '#FFFFFF')
figname = _get_kwarg('figname', kwargs, 'figure.pdf')
outname = _get_kwarg('outname', kwargs, 'figure.png')
pltshow = _get_kwarg('pltshow', kwargs, False)
sheight = len(sources)
ofigure = '.'.join([figname.split('.')[0], extname])
destins = [os.path.join(src, ofigure) for src in sources]
sources = [os.path.join(src, figname) for src in sources]
command = ['convert','-density', density, '-transparent', backgrd]
fig, ax = plt.subplots(sheight, 1, figsize=figsize)
if sheight == 1: ax = [ax] #TODO fix this workaround!
for i,(src,dst) in enumerate(zip(sources, destins)):
print('\nProcessing', src)
if not os.path.isfile(dst) or convert:
fresize = ['convert', dst, '-resize', rzwidth, dst]
if os.path.isfile(src) and not convert:
_run(command+[src, dst], 'compile.log', operation='convert')
else:
print('Trying to compile %s' % src)
# Pass compfig with convert value to be coherent
_compile_gnuplot(os.path.dirname(src), compfig=convert)
_run(command+[src, dst], 'compile.log', operation='convert')
_run(fresize, 'compile.log', operation='resize')
img = Image.open(dst)
#if sheight > 1:
ax[i].imshow(img)
ax[i].axis('off')
adj = {'left': 0.0, 'bottom': 0.0, 'right': 1.0,
'top': 1.0, 'wspace': 1.0, 'hspace': 0.0}
plt.setp([a.get_xticklabels() for a in ax], visible=False)
plt.setp([a.get_yticklabels() for a in ax], visible=False)
plt.subplots_adjust(**adj)
plt.savefig(outname, dpi=600)
print('Generated', outname)
if pltshow: plt.show()
# ----------------------------------------------------------------------------
# plot_mgr: manage files in compilation of figures
# TODO allow single project parsing through dictionary
# Relevant snippets and solutions from:
# - stackoverflow.com/questions/9867562/
# - stackoverflow.com/questions/6591931
# ----------------------------------------------------------------------------
class plot_mgr:
def __init__(self):
self._count = 0
self._cache = {}
self._generated = []
def add_plot_stacked(self, sources, **kwargs):
if DEBUG_MODULE: _print_head(self.add_plot.__name__, kwargs)
self._count += 1 # increment before for counting correctly
self._cache[str(self._count)] = [sources, kwargs]
def execute_stacked(self):
for key, val in self._cache.items():
#TODO this is not ordered, I used a dict initially
print('Dealing with %s of %d' % (key, self._count))
self._generated.append(_plot_stacked(val[0], **val[1]))
def clean_intermediates(self, listonly=False):
intermediates = []
print('\nCleaning intermediate files')
for key, val in self._cache.items():
print('Buffering: %s of %d' % (key, self._count))
for src in val[0]:
figname = _get_kwarg('figname', val[1], 'figure.pdf')
extname = _get_kwarg('extname', val[1], 'png')
ofigure = '.'.join([figname.split('.')[0], extname])
intermediates.append(os.path.join(src, figname))
intermediates.append(os.path.join(src, ofigure))
if listonly:
print('\nFiles to remove:')
print('\n'.join([inter for inter in intermediates]))
return
size = 0
print('\nRemoving... (this might take a while)')
for inter in intermediates:
if os.path.isfile(inter):
size += os.path.getsize(inter)
print('-->', inter)
os.remove(inter)
else:
print('Cannot remove', inter)
print('\nCleaned a total of %.2f Mb' % (size/(1024.0*1024.0)))
def clean_all(self):
print('sleep before implementing')
self.clean_intermediates() #maybe make a common private for this
#don't forget about logfiles
#self._generated has the list of generated (final) files
# ----------------------------------------------------------------------------
# EOF
# ----------------------------------------------------------------------------
| unlicense |
savitasavadi/ml_lab_ecsc_306 | labwork/lab2/sci-learn/logistic_regression.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| apache-2.0 |
kagayakidan/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
bblais/Classy | classy/unsupervised.py | 1 | 6946 | from sklearn.utils.extmath import safe_sparse_dot
from . import autoencoder
from sklearn.utils import check_X_y,check_array
from .autoencoder import Autoencoder
from sklearn.decomposition import PCA as RandomizedPCA
#from .multilayer_perceptron import MultilayerPerceptronAutoencoder
from numpy import array
from .datasets import Struct
from copy import deepcopy
class GenericFilter(object):
def fit_transform_data(self,data):
new_data=Struct()
new_data.DESCR="Transformed"
for key in data:
if key=='vectors' or key=='feature_names':
continue
new_data[key]=deepcopy(data[key])
new_data.vectors=self.fit_transform(data.vectors)
new_data.feature_names=['F%d' % (f+1) for f in range(new_data.vectors.shape[1])]
return new_data
def transform_data(self,data):
new_data=Struct()
new_data.DESCR="Transformed"
for key in data:
if key=='vectors' or key=='feature_names':
continue
new_data[key]=deepcopy(data[key])
new_data.vectors=self.transform(data.vectors)
new_data.feature_names=['F%d' % (f+1) for f in range(new_data.vectors.shape[1])]
return new_data
class AutoEncoder(Autoencoder,GenericFilter):
def __init__(self,*args,**kwargs):
MultilayerPerceptronAutoencoder.__init__(self,*args,**kwargs)
self.equivalent={}
def fit(self,*args,**kwargs):
MultilayerPerceptronAutoencoder.fit(self,*args,**kwargs)
for name in self.equivalent:
super(MultilayerPerceptronAutoencoder,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def fit_transform(self,*args,**kwargs):
result=MultilayerPerceptronAutoencoder.fit_transform(self,*args,**kwargs)
for name in self.equivalent:
super(MultilayerPerceptronAutoencoder,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
return result
def output(self, X):
"""Fit the model to the data X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples)
Predicted target values per element in X.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = []
activations.append(X)
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations, with_output_activation=False)
y_pred = activations[-1]
return activations[1:]
def plot(self,only=None):
from pylab import plot,subplot,sqrt,ceil,title
weights=self.weights_xh.T
if only is None:
only=list(range(len(weights)))
L=len(only)
c=ceil(sqrt(L))
r=ceil(L/c)
for i,idx in enumerate(only):
w=weights[idx]
subplot(r,c,i+1)
plot(w,'-o')
title('Filter %d' % (idx))
def imshow(self,shape,only=None):
from pylab import subplot,imshow,cm,title,sqrt,ceil
weights=self.weights_xh.T
if only is None:
only=list(range(len(weights)))
L=len(only)
c=ceil(sqrt(L))
r=ceil(L/c)
for i,idx in enumerate(only):
w=weights[idx]
w=w.reshape(shape)
subplot(r,c,i+1)
imshow(w,cmap=cm.gray,interpolation='nearest')
title('Filter %d' % (idx))
from sklearn.mixture import GaussianMixture as GMM
class GMM1D(GenericFilter):
def __init__(self,number_of_gaussians):
self.number_of_gaussians=number_of_gaussians
if isinstance(number_of_gaussians,int):
self.number_of_gaussians=[number_of_gaussians]
def fit(self,X):
pass
def transform(self,X):
newX=[]
models=[GMM(M) for M in self.number_of_gaussians]
for v in X:
vec=[]
for model in models:
model.fit(v)
means=model.means_.ravel()
stddevs=model.covars_.ravel()
for m,s in zip(means,stddevs):
vec.append(m)
vec.append(s)
newX.append(vec)
newX=array(newX)
return newX
def fit_transform(self,X):
self.fit(X)
return self.transform(X)
class PCA(RandomizedPCA,GenericFilter):
def __init__(self,*args,**kwargs):
RandomizedPCA.__init__(self,*args,**kwargs)
self.equivalent={'weights':'components_',
'components':'components_',
}
def fit(self,*args,**kwargs):
RandomizedPCA.fit(self,*args,**kwargs)
for name in self.equivalent:
super(RandomizedPCA,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def fit_transform(self,*args,**kwargs):
result=RandomizedPCA.fit_transform(self,*args,**kwargs)
for name in self.equivalent:
super(RandomizedPCA,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
return result
def plot(self,only=None):
from pylab import plot,subplot,sqrt,ceil,title
if only is None:
only=list(range(len(self.weights)))
L=len(only)
c=ceil(sqrt(L))
r=ceil(L/c)
for i,idx in enumerate(only):
w=self.weights[idx]
subplot(r,c,i+1)
plot(w,'-o')
title('PC %d' % (idx))
def imshow(self,shape,only=None):
from pylab import subplot,imshow,cm,title,sqrt,ceil
if only is None:
only=list(range(len(self.weights)))
L=len(only)
c=ceil(sqrt(L))
r=ceil(L/c)
for i,idx in enumerate(only):
w=self.weights[idx]
w=w.reshape(shape)
subplot(r,c,i+1)
imshow(w,cmap=cm.gray,interpolation='nearest')
title('PC %d' % (idx))
| mit |
ppwwyyxx/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
sinhrks/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/discrete/tests/test_discrete.py | 8 | 55883 | """
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.score_obs(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog,
exog_st).fit(method="cg", disp=0, maxiter=1000,
gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog,
data.exog).fit(start_params=start_params,
method="cg", maxiter=1000,
gtol=1e-05, disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8,
warn_convergence=False)
# converges close enough but warnflag is 2 for precision loss
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15, method='bfgs', maxiter=1000)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15,
method='bfgs',
maxiter=1000)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0,
maxiter=1000)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
# this will raise if you set maxiter high enough with a singular matrix
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
mod.fit(disp=False, maxiter=50) # should not raise but does warn
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_(smry == test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
def test_non_binary():
y = [1, 2, 1, 2, 1, 2]
X = np.random.randn(6, 2)
np.testing.assert_raises(ValueError, Logit, y, X)
def test_mnlogit_factor():
dta = sm.datasets.anes96.load_pandas()
dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))
dta.exog['constant'] = 1
mod = sm.MNLogit(dta.endog, dta.exog)
res = mod.fit(disp=0)
# smoke tests
params = res.params
summary = res.summary()
# with patsy
del dta.exog['constant']
mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
res2 = mod.fit(disp=0)
res2.params
summary = res2.summary()
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
# should work
mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')
# make sure this raises
exposure = pd.Series(np.random.randn(5))
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause |
GenericMappingTools/gmt-python | pygmt/tests/test_surface.py | 1 | 3649 | """
Tests for surface.
"""
import os
import pytest
import xarray as xr
from pygmt import surface, which
from pygmt.datasets import load_sample_bathymetry
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import data_kind
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEMP_GRID = os.path.join(TEST_DATA_DIR, "tmp_grid.nc")
@pytest.fixture(scope="module", name="ship_data")
def fixture_ship_data():
"""
Load the data from the sample bathymetry dataset.
"""
return load_sample_bathymetry()
def test_surface_input_file():
"""
Run surface by passing in a filename.
"""
fname = which("@tut_ship.xyz", download="c")
output = surface(data=fname, spacing="5m", region=[245, 255, 20, 30])
assert isinstance(output, xr.DataArray)
assert output.gmt.registration == 0 # Gridline registration
assert output.gmt.gtype == 0 # Cartesian type
return output
def test_surface_input_data_array(ship_data):
"""
Run surface by passing in a numpy array into data.
"""
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
output = surface(data=data, spacing="5m", region=[245, 255, 20, 30])
assert isinstance(output, xr.DataArray)
return output
def test_surface_input_xyz(ship_data):
"""
Run surface by passing in x, y, z numpy.ndarrays individually.
"""
output = surface(
x=ship_data.longitude,
y=ship_data.latitude,
z=ship_data.bathymetry,
spacing="5m",
region=[245, 255, 20, 30],
)
assert isinstance(output, xr.DataArray)
return output
def test_surface_input_xy_no_z(ship_data):
"""
Run surface by passing in x and y, but no z.
"""
with pytest.raises(GMTInvalidInput):
surface(
x=ship_data.longitude,
y=ship_data.latitude,
spacing="5m",
region=[245, 255, 20, 30],
)
def test_surface_wrong_kind_of_input(ship_data):
"""
Run surface using grid input that is not file/matrix/vectors.
"""
data = ship_data.bathymetry.to_xarray() # convert pandas.Series to xarray.DataArray
assert data_kind(data) == "grid"
with pytest.raises(GMTInvalidInput):
surface(data=data, spacing="5m", region=[245, 255, 20, 30])
def test_surface_with_outfile_param(ship_data):
"""
Run surface with the -Goutputfile.nc parameter.
"""
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
output = surface(
data=data, spacing="5m", region=[245, 255, 20, 30], outfile=TEMP_GRID
)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=TEMP_GRID) # check that outfile exists at path
with xr.open_dataarray(TEMP_GRID) as grid:
assert isinstance(grid, xr.DataArray) # ensure netcdf grid loads ok
finally:
os.remove(path=TEMP_GRID)
return output
def test_surface_short_aliases(ship_data):
"""
Run surface using short aliases -I for spacing, -R for region, -G for
outfile.
"""
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
output = surface(data=data, I="5m", R=[245, 255, 20, 30], G=TEMP_GRID)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=TEMP_GRID) # check that outfile exists at path
with xr.open_dataarray(TEMP_GRID) as grid:
assert isinstance(grid, xr.DataArray) # ensure netcdf grid loads ok
finally:
os.remove(path=TEMP_GRID)
return output
| bsd-3-clause |
mas-dse-greina/neon | luna16/old_code/LUNA16_extract_patches.py | 1 | 9294 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This loads the LUNA16 mhd files (3D images), extracts the transverse patches (64x64)
around the candidate positions, and then saves those patches to a subdirectory.
In another script we'll take those patches and run them through a modified
VGG model to see if we can correctly classify nodule (class 1) from
non-nodule (class 0).
"""
import SimpleITK as sitk
import numpy as np
import pandas as pd
import os
import ntpath
from neon.util.argparser import NeonArgparser
import logging
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("--subset", default='subset0',
help='LUNA16 subset directory to process')
args = parser.parse_args()
# To get the original LUNA16 MHD data:
# wget https://www.dropbox.com/sh/mtip9dx6zt9nb3z/AAAs2wbJxbNM44-uafZyoMVca/subset5.zip
# The files are 7-zipped. Regular linux unzip won't work to uncompress them. Use 7za instead.
# 7za e subset5.zip
DATA_DIR = '/mnt/data/medical/luna16/'
SUBSET = args.subset
cand_path = 'CSVFILES/candidates_with_annotations.csv' # Candidates file tells us the centers of the ROI for candidate nodules
# Set up logging
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler('all_'+SUBSET+'.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def extractCandidates(img_file):
# Get the name of the file
subjectName = ntpath.splitext(ntpath.basename(img_file))[0] # Strip off the .mhd extension
# Read the list of candidate ROI
dfCandidates = pd.read_csv(DATA_DIR+cand_path)
numCandidates = dfCandidates[dfCandidates['seriesuid']==subjectName].shape[0]
print('Subject {}: There are {} candidate nodules in this file.'.format(subjectName, numCandidates))
numNonNodules = sum(dfCandidates[dfCandidates['seriesuid']==subjectName]['class'] == 0)
numNodules = sum(dfCandidates[dfCandidates['seriesuid']==subjectName]['class'] == 1)
print('{} are true nodules (class 1) and {} are non-nodules (class 0)'.format(numNodules, numNonNodules))
# Read if the candidate ROI is a nodule (1) or non-nodule (0)
candidateValues = dfCandidates[dfCandidates['seriesuid']==subjectName]['class'].values
# Get the world coordinates (mm) of the candidate ROI center
worldCoords = dfCandidates[dfCandidates['seriesuid']==subjectName][['coordX', 'coordY', 'coordZ']].values
# Use SimpleITK to read the mhd image
itkimage = sitk.ReadImage(img_file)
# Normalize the image to be 1.0 x 1.0 x 1.0 mm voxel size
itkimage = normalize_img(itkimage)
# Get the real world origin (mm) for this image
originMatrix = np.tile(itkimage.GetOrigin(), (numCandidates,1)) # Real world origin for this image (0,0)
# Subtract the real world origin and scale by the real world (mm per pixel)
# This should give us the X,Y,Z coordinates for the candidates
candidatesPixels = (np.round(np.absolute(worldCoords - originMatrix) / itkimage.GetSpacing())).astype(int)
candidatePatches = []
imgAll = sitk.GetArrayFromImage(itkimage) # Read the image volume
valueArray = []
for candNum in range(numCandidates):
#print('Extracting candidate patch #{}'.format(candNum))
candidateVoxel = candidatesPixels[candNum,:]
xpos = int(candidateVoxel[0])
ypos = int(candidateVoxel[1])
zpos = int(candidateVoxel[2])
# Need to handle the candidates where the window would extend beyond the image boundaries
windowSize = 64 # Center a 64 pixel by 64 pixel patch around the candidate position
x_lower = np.max([0, xpos - windowSize//2]) # Return 0 if position off image
x_upper = np.min([xpos + windowSize//2, itkimage.GetWidth()]) # Return maxWidth if position off image
y_lower = np.max([0, ypos - windowSize//2]) # Return 0 if position off image
y_upper = np.min([ypos + windowSize//2, itkimage.GetHeight()]) # Return maxHeight if position off image
z_lower = np.max([0, zpos - windowSize//2]) # Return 0 if position off image
z_upper = np.min([zpos + windowSize//2, itkimage.GetDepth()]) # Return maxHeight if position off image
skipPatch = False
if ((xpos - windowSize//2) < 0) | ((xpos + windowSize//2) > itkimage.GetWidth()):
logger.info('img file {} off x for candidate {}, label {}'.format(img_file, candNum, candidateValues[candNum]))
skipPatch = True
if ((ypos - windowSize//2) < 0) | ((ypos + windowSize//2) > itkimage.GetHeight()):
logger.info('img file {} off y for candidate {}, label {}'.format(img_file, candNum, candidateValues[candNum]))
skipPatch = True
# SimpleITK is x,y,z. Numpy is z, y, x.
imgPatch = imgAll[zpos, y_lower:y_upper, x_lower:x_upper]
#imgPatch = imgAll[zpos, :, :]
# Normalize to the Hounsfield units
imgPatchNorm = normalizePlanes(imgPatch)
if not skipPatch:
candidatePatches.append(imgPatchNorm) # Append the candidate image patches to a python list
valueArray.append(candidateValues[candNum])
return candidatePatches, valueArray
"""
Normalize pixel depth into Hounsfield units (HU)
This tries to get all pixels between -1000 and 400 HU.
All other HU will be masked.
Then we normalize pixel values between 0 and 1.
"""
def normalizePlanes(npzarray):
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray>1] = 1.
npzarray[npzarray<0] = 0.
return npzarray
def normalize_img(img):
'''
Sets the MHD image to be approximately 1.0 mm voxel size
https://itk.org/ITKExamples/src/Filtering/ImageGrid/ResampleAnImage/Documentation.html
'''
new_x_size = int(img.GetSpacing()[0]*img.GetWidth()) # Number of pixels you want for x dimension
new_y_size = int(img.GetSpacing()[1]*img.GetHeight()) # Number of pixels you want for y dimension
new_z_size = int(img.GetSpacing()[2]*img.GetDepth()) # Number of pixels you want for z dimesion
new_size = [new_x_size, new_y_size, new_z_size]
# new_spacing = [old_sz*old_spc/new_sz for old_sz, old_spc, new_sz in zip(img.GetSize(), img.GetSpacing(), new_size)]
new_spacing = [1,1,1] # New spacing to be 1.0 x 1.0 x 1.0 mm voxel size
interpolator_type = sitk.sitkLinear
return sitk.Resample(img, new_size, sitk.Transform(), interpolator_type, img.GetOrigin(), new_spacing, img.GetDirection(), 0.0, img.GetPixelIDValue())
from scipy.misc import toimage
"""
Save the image patches for a given data file
"""
# We need to save the array as an image.
# This is the easiest way. Matplotlib seems to like adding a white border that is hard to kill.
def SavePatches(manifestFilename, img_file, patchesArray, valuesArray):
saveDir = ntpath.dirname(img_file) + '/patches_ALL'
try:
os.stat(saveDir)
except:
os.mkdir(saveDir)
with open(manifestFilename, 'a') as f: # Write to the manifest file for aeon loader
subjectName = ntpath.splitext(ntpath.basename(img_file))[0]
print('Saving image patches for file {}/{}.'.format(SUBSET, subjectName))
for i in range(len(valuesArray)):
#print('\r{} of {}'.format(i+1, len(valuesArray))),
im = toimage(patchesArray[i])
pngName = saveDir + '/{}_{}_{}.png'.format(subjectName, i, valuesArray[i])
im.save(pngName)
f.write('{},label_{}.txt\n'.format(pngName, valuesArray[i]))
f.close()
print('{}: Finished {}\n'.format(SUBSET, subjectName))
"""
Loop through all .mhd files within the data directory and process them.
"""
# Reset the manifest file to empty
manifestFilename = 'manifest_{}_ALL.csv'.format(SUBSET)
f = open(manifestFilename, 'w')
f.close()
for root, dirs, files in os.walk(DATA_DIR+SUBSET):
for file in files:
if (file.endswith('.mhd')) & ('__MACOSX' not in root): # Don't get the Macintosh directory
img_file = os.path.join(root, file)
patchesArray, valuesArray = extractCandidates(img_file)
SavePatches(manifestFilename, img_file, patchesArray, valuesArray)
| apache-2.0 |
sergiopasra/megaradrp | tools/check_overscan.py | 2 | 3194 |
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import numpy as np
import scipy.ndimage.filters as filt
from scipy.interpolate import LSQUnivariateSpline
conf = {"trim1": [[0,2056],[50,4146]],
"trim2": [[2156,4212],[50,4146]],
"overscan1": [[0,2056],[4146,4196]],
"overscan1_corner": [[2056,2106],[4146,4196]],
"overscan2": [[2156,4212],[0,50]],
"overscan2_corner": [[2106,2156],[4146,4196]],
"prescan1": [[0,2056],[0,50]],
"prescan2": [[2156,4212],[4146,4196]],
"middle1": [[2056,2106],[50,4146]],
"middle2": [[2106,2156],[50,4146]],
"gain1": 1.73,
"gain2": 1.6
}
def to_slice(sec):
sec1, sec2 = sec
return slice(*sec1), slice(*sec2)
def to_str(sec, format='fits'):
sec1, sec2 = sec
return str([to_index(sec2), to_index(sec1)])
def to_index(ssec, format='fits'):
a, b = ssec
return [a + 1, b]
def to_pix(sec, axis=0):
return np.arange(*sec[axis])
def plot2(data, name, knots, ax, s=0, r=-1):
reg = conf[name]
axis_u = 0
axis_v = 1
plot4(data, reg, axis_u, axis_v, knots, ax, s=s, r=r)
def plot3(data, name, knots, ax, s=0, r=-1):
reg = conf[name]
axis_u = 1
axis_v = 0
plot4(data, reg, axis_u, axis_v, knots, ax, s=s, r=r)
def plot4(data, reg, axis_u, axis_v, knots, ax, s=0, r=-1):
u = to_pix(reg, axis=axis_u)
region = to_slice(reg)
v = data[region].mean(axis=axis_v)
v = filt.median_filter(v, size=7)
spl2 = LSQUnivariateSpline(u, v, knots, k=3)
v_spl2 = spl2(u)
ax.plot(u[s:r], v[s:r], label="D")
ax.plot(u[s:r], v_spl2[s:r], label="S2")
if __name__ == '__main__':
import os
import argparse
parser = argparse.ArgumentParser(description='Check overscan')
parser.add_argument('filename', metavar='FILE', nargs='+',
help='Check overscan')
args = parser.parse_args()
for fname in args.filename:
print(fname)
fname_base, ext = os.path.splitext(fname)
hdulist = fits.open(fname)
hdu = hdulist[0]
data = hdu.data
s = 0
r = -1
knots1 = [10, 100, 200, 300, 400, 500, 750, 1000, 1200, 1500, 1700, 2000]
knots2 = [2200, 3000, 3500, 3900, 4000, 4100, 4200]
knots1 = [125, 250, 375, 500, 1000, 1500]
knots2 = [2500, 3000, 3500, 3600, 3700, 3800]
knots1 = [1200]
knots2 = [3100]
knotsm = [2100]
for regname, knots in zip(['overscan1', 'overscan2'], [knots1, knots2]):
fig, axes = plt.subplots(1, 1)
regs = to_str(conf[regname])
axes.set_title("{}\n{} {}".format(fname, regname, regs))
plot2(data, regname, knots, axes, s=s, r=r)
plt.savefig("{}_{}.png".format(fname_base, regname))
plt.close()
for regname, knots in zip(['middle1', 'middle2'], [knotsm, knotsm]):
fig, axes = plt.subplots(1, 1)
regs = to_str(conf[regname])
axes.set_title("{}\n{} {}".format(fname, regname, regs))
plot3(data, regname, knots, axes, s=s, r=r)
plt.savefig("{}_{}.png".format(fname_base, regname))
plt.close()
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/period/test_period_range.py | 2 | 3609 | import pytest
from pandas import NaT, Period, PeriodIndex, date_range, period_range
import pandas.util.testing as tm
class TestPeriodRange:
@pytest.mark.parametrize("freq", ["D", "W", "M", "Q", "A"])
def test_construction_from_string(self, freq):
# non-empty
expected = date_range(
start="2017-01-01", periods=5, freq=freq, name="foo"
).to_period()
start, end = str(expected[0]), str(expected[-1])
result = period_range(start=start, end=end, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=start, periods=5, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=5, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq=freq, name="foo")
result = period_range(start=start, periods=0, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
def test_construction_from_period(self):
# upsampling
start, end = Period("2017Q1", freq="Q"), Period("2018Q1", freq="Q")
expected = date_range(
start="2017-03-31", end="2018-03-31", freq="M", name="foo"
).to_period()
result = period_range(start=start, end=end, freq="M", name="foo")
tm.assert_index_equal(result, expected)
# downsampling
start, end = Period("2017-1", freq="M"), Period("2019-12", freq="M")
expected = date_range(
start="2017-01-31", end="2019-12-31", freq="Q", name="foo"
).to_period()
result = period_range(start=start, end=end, freq="Q", name="foo")
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq="W", name="foo")
result = period_range(start=start, periods=0, freq="W", name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq="W", name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq="W", name="foo")
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = (
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1")
with pytest.raises(ValueError, match=msg):
period_range(end="2017Q1")
with pytest.raises(ValueError, match=msg):
period_range(periods=5)
with pytest.raises(ValueError, match=msg):
period_range()
# too many params
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1", end="2018Q1", periods=8, freq="Q")
# start/end NaT
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start=NaT, end="2018Q1")
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1", end=NaT)
# invalid periods param
msg = "periods must be a number, got foo"
with pytest.raises(TypeError, match=msg):
period_range(start="2017Q1", periods="foo")
| apache-2.0 |
stonneau/hpp-rbprm-corba | script/tools/calibrate-w/computespaces.py | 4 | 3772 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer
import sys
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
#~ fullBody.setJointBounds ("base_joint_xyz", [-1,2, -2, 1, 0.5, 2.5])
#~ AFTER loading obstacles
rLegId = 'hrp2_rleg_rom'
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,-0.105,0,]
rLegNormal = [0,1,0]
rLegx = 0.03; rLegy = 0.03
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 10000, "manipulability", 0.03)
lLegId = 'hrp2_lleg_rom'
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,-0.105,0]
lLegNormal = [0,1,0]
lLegx = 0.03; lLegy = 0.03
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 10000, "manipulability", 0.03)
rarmId = 'hrp2_rarm_rom'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
rArmOffset = [-0.05,-0.050,-0.050]
rArmNormal = [1,0,0]
rArmx = 0.024; rArmy = 0.024
fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 10000, "EFORT", 0.05)
larmId = 'hrp2_larm_rom'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
lArmOffset = [-0.05,-0.050,-0.050]
lArmNormal = [1,0,0]
lArmx = 0.024; lArmy = 0.024
fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, "EFORT", 0.05)
scene = sys.argv[len(sys.argv)-1]
configFile = "creach_2DGrid_"+scene+'.pkl'
import pickle
pkl_file = open(configFile, 'rb')
ok_configs = pickle.load(pkl_file)
pkl_file.close()
ps = ProblemSolver( fullBody )
r = Viewer (ps)
r.loadObstacleModel ('hpp-rbprm-corba', scene, "planning")
limbs = [larmId, rarmId, lLegId, rLegId]
q_init = [
0, 0, 0, 1.0, 0.0 , 0.0, 0.0, # Free flyer 0-6
0.0, 0.0, 0.0, 0.0, # CHEST HEAD 7-10
0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # LARM 11-17
0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # RARM 18-24
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # LLEG 25-30
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # RLEG 31-36
]; r (q_init)
fullBody.setCurrentConfig (q_init)
negative = {}
positive = {}
nbnegative = 0
totalconfigs = 0
res = {}
x_start = -1.5
y_start = 0
x_max = 2.5
y_max = 2.65
iter_step = 0.1
res = {}
import numpy as np
nbStepsX = int((x_max - x_start) / iter_step)
nbStepsY = int((y_max - y_start) / iter_step)
x_start = 0
y_start = 0
x_max = 2
y_max = 1.64
iter_step = 0.01
x_t = []
y_t = []
#~ res = []
for x in ok_configs.keys():
for y in ok_configs[x].keys():
#~
#~ for x in np.linspace(x_start,x_max, num=nbStepsX):
#~ for y in np.linspace(y_start,y_max, num=nbStepsY):
q = q_init
q[0] = x
q[1] = -0.82
q[2] = y
#~ print "test"
if (fullBody.canGenerateBalancedContact(q, [0,0,1])):
#~ print "ok"
x_t.append(x)
y_t.append(y)
#~ res.append(fullBody.generateContacts(q, [0,0,1]))
x_t.append(0)
y_t.append(0) #for scale
x_t.append(2)
y_t.append(1.64) #for scale
import pickle
sFile = "cgeom_feasible_2DGrid_"+scene+'.pkl'
output = open(sFile, 'wb')
pickle.dump(res, output)
output.close()
import numpy as np
import matplotlib.pyplot as plt
#~ plt.scatter(x_t, y_t, s=200, marker='s', edgecolors='none')
#~ plt.scatter(x_t, y_t, s=170, marker='s' ,color ='c', edgecolors='none')
plt.scatter(x_t, y_t, edgecolors='none')
plt.show()
| lgpl-3.0 |
nok/sklearn-porter | tests/estimator/classifier/NuSVC/NuSVCJavaTest.py | 1 | 1085 | # -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from sklearn.svm.classes import NuSVC
from tests.estimator.classifier.Classifier import Classifier
from tests.estimator.classifier.ExportedData import ExportedData
from tests.language.Java import Java
class NuSVCJavaTest(Java, Classifier, ExportedData, TestCase):
def setUp(self):
super(NuSVCJavaTest, self).setUp()
self.estimator = NuSVC(kernel='rbf', gamma=0.001, random_state=0)
def tearDown(self):
super(NuSVCJavaTest, self).tearDown()
@unittest.skip('The generated code would be too large.')
def test_existing_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_existing_features__digits_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__digits_data__default(self):
pass
| mit |
burakbayramli/classnotes | linear/linear_app70svdapprox/ssvd.py | 2 | 1202 | from numpy.linalg import linalg as la
import numpy as np
import random, pandas as pd
def create_training_test(df,collim=2,rowlim=200):
test_data = []
df_train = df.copy()
for u in range(df.shape[0]):
row = df.ix[u]; idxs = row.index[row.notnull()]
if len(idxs) > collim:
i = random.choice(idxs); val = df.ix[u,i]
test_data.append([u,i,val])
df_train.ix[u,i] = np.nan
if len(test_data) > rowlim: break
return df_train, test_data
def ssvd(df_train,k):
lam = 0.02 # regularizasyon
gamma = 0.01 # adim katsayisi
m,n = df_train.shape
b_u = np.random.uniform(0, 0.1, size=m)
b_i = np.random.uniform(0, 0.1, size=n)
p_u = np.random.rand(m,k)
q_i = np.random.rand(k, n)
r_ui = np.array(df_train)
for u in range(m):
row = df_train.ix[u]; idxs = row.index[row.notnull()]
for i in idxs:
i = int(i)
r_ui_hat = np.dot(q_i[:,i].T,p_u[u,:])
e_ui = r_ui[u,i] - r_ui_hat
q_i[:,i] = q_i[:,i] + gamma * (e_ui*p_u[u,:].T - lam*q_i[:,i])
p_u[u,:] = p_u[u,:] + gamma * (e_ui*q_i[:,i].T - lam*p_u[u,:])
return q_i,p_u
| gpl-3.0 |
abought/osf.io | scripts/analytics/addons.py | 18 | 2173 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
shakamunyi/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 6 | 4519 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
def x_coord_key(x_y_i):
(x, y, i) = x_y_i
return (x, (x, y, i))
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(x_coord_key)
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda k_coords: ' '.join('(%s, %s, %s)' % c for c in k_coords[1]))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
jdavidrcamacho/Tests_GP | 06 - Results/tests_lineardecay.py | 1 | 16712 | # -*- coding: utf-8 -*-
import Gedi as gedi
import numpy as np; #np.random.seed(13042017)
import matplotlib.pylab as pl; pl.close("all")
import astropy.table as Table
import sys
##### Spots data preparation ##################################################
print
print "****************************************************"
print "Preparing data"
print "****************************************************"
print
spots_data= [] #to contain all data in the end
#data from .rdb file
rdb_data= Table.Table.read('output_spots21.rdb',format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
t= np.array(range(1,101))
pl.figure('data')
pl.plot(t,y,'-')
#pl.close('data')
print "Done."
##### Optimization ############################################################
print
print "****************************************************"
print "Running optimization algorithms - round 1"
print "****************************************************"
print
f=open("Tests_gradient_optimization1.txt","w")
sys.stdout = f
kernel1= gedi.kernel.ExpSineSquared(15.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel2= gedi.kernel.QuasiPeriodic(15.0, 1.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel3= gedi.kernel.ExpSquared(15.0,1.0)+ \
gedi.kernel.WhiteNoise(1.0)
likelihood1= gedi.kernel_likelihood.likelihood(kernel1,t,y,yerr)
likelihood2= gedi.kernel_likelihood.likelihood(kernel2,t,y,yerr)
likelihood3= gedi.kernel_likelihood.likelihood(kernel3,t,y,yerr)
print "Initial kernel # log-likelihood"
print kernel1, '#', likelihood1
print kernel2, '#', likelihood2
print kernel3, '#', likelihood3
print
optimization1= gedi.kernel_optimization.committed_optimization(kernel1, \
t,y,yerr)
#optimization1= gedi.kernel_optimization.single_optimization(kernel1, \
# t,y,yerr,method="altsda")
optimization2= gedi.kernel_optimization.committed_optimization(kernel2, \
t,y,yerr)
#optimization2= gedi.kernel_optimization.single_optimization(kernel2, \
# t,y,yerr,method="altsda")
optimization3= gedi.kernel_optimization.committed_optimization(kernel3, \
t,y,yerr)
#optimization= gedi.kernel_optimization.single_optimization(kernel3, \
# t,y,yerr,method="altsda")
print "Final kernel # log-likelihood"
print optimization1[1], '#', optimization1[0]
print optimization2[1], '#', optimization2[0]
print optimization3[1], '#', optimization3[0]
print
sys.stdout = sys.__stdout__
f.close()
print "Done."
##### Final Graphics #########################################################
print
print "****************************************************"
print "Preparing graphics"
print "****************************************************"
print
xcalc= np.linspace(0,101,500)
[mu1,std1]= gedi.kernel_likelihood.compute_kernel(optimization1[1], \
t,xcalc,y,yerr)
[mu2,std2]= gedi.kernel_likelihood.compute_kernel(optimization2[1], \
t,xcalc,y,yerr)
[mu3,std3]= gedi.kernel_likelihood.compute_kernel(optimization3[1], \
t,xcalc,y,yerr)
pl.figure('fit1') #Graphics
pl.fill_between(xcalc, mu1+std1, mu1-std1, color="k", alpha=0.1)
pl.plot(xcalc, mu1+std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1-std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_ess_spots1.png")
pl.close('fit1')
pl.figure('fit2') #Graphics
pl.fill_between(xcalc, mu2+std2, mu2-std2, color="k", alpha=0.1)
pl.plot(xcalc, mu2+std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2-std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_qp_spots1.png")
pl.close('fit2')
pl.figure('fit3') #Graphics
pl.fill_between(xcalc, mu3+std3, mu3-std3, color="k", alpha=0.1)
pl.plot(xcalc, mu3+std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3-std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_es_spots1.png")
pl.close('fit3')
print "Done."
###############################################################################
##### Optimization ############################################################
print
print "****************************************************"
print "Running optimization algorithms - round 2"
print "****************************************************"
print
f=open("Tests_gradient_optimization2.txt","w")
sys.stdout = f
kernel1= gedi.kernel.ExpSineSquared(15.0, 1.0, 20.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel2= gedi.kernel.QuasiPeriodic(15.0, 1.0, 1.0, 20.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel3= gedi.kernel.ExpSquared(15.0,1.0)+ \
gedi.kernel.WhiteNoise(1.0)
likelihood1= gedi.kernel_likelihood.likelihood(kernel1,t,y,yerr)
likelihood2= gedi.kernel_likelihood.likelihood(kernel2,t,y,yerr)
likelihood3= gedi.kernel_likelihood.likelihood(kernel3,t,y,yerr)
print "Initial kernel # log-likelihood"
print kernel1, '#', likelihood1
print kernel2, '#', likelihood2
print kernel3, '#', likelihood3
print
optimization1= gedi.kernel_optimization.committed_optimization(kernel1, \
t,y,yerr)
#optimization1= gedi.kernel_optimization.single_optimization(kernel1, \
# t,y,yerr,method="altsda")
optimization2= gedi.kernel_optimization.committed_optimization(kernel2, \
t,y,yerr)
#optimization2= gedi.kernel_optimization.single_optimization(kernel2, \
# t,y,yerr,method="altsda")
optimization3= gedi.kernel_optimization.committed_optimization(kernel3, \
t,y,yerr)
#optimization= gedi.kernel_optimization.single_optimization(kernel3, \
# t,y,yerr,method="altsda")
print "Final kernel # log-likelihood"
print optimization1[1], '#', optimization1[0]
print optimization2[1], '#', optimization2[0]
print optimization3[1], '#', optimization3[0]
print
sys.stdout = sys.__stdout__
f.close()
print "Done."
##### Final Graphics #########################################################
print
print "****************************************************"
print "Preparing graphics"
print "****************************************************"
print
xcalc= np.linspace(0,101,500)
[mu1,std1]= gedi.kernel_likelihood.compute_kernel(optimization1[1], \
t,xcalc,y,yerr)
[mu2,std2]= gedi.kernel_likelihood.compute_kernel(optimization2[1], \
t,xcalc,y,yerr)
[mu3,std3]= gedi.kernel_likelihood.compute_kernel(optimization3[1], \
t,xcalc,y,yerr)
pl.figure('fit1') #Graphics
pl.fill_between(xcalc, mu1+std1, mu1-std1, color="k", alpha=0.1)
pl.plot(xcalc, mu1+std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1-std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_ess_spots2.png")
pl.close('fit1')
pl.figure('fit2') #Graphics
pl.fill_between(xcalc, mu2+std2, mu2-std2, color="k", alpha=0.1)
pl.plot(xcalc, mu2+std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2-std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_qp_spots2.png")
pl.close('fit2')
pl.figure('fit3') #Graphics
pl.fill_between(xcalc, mu3+std3, mu3-std3, color="k", alpha=0.1)
pl.plot(xcalc, mu3+std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3-std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_es_spots2.png")
pl.close('fit3')
print "Done."
###############################################################################
##### Optimization ############################################################
print
print "****************************************************"
print "Running optimization algorithms - round 3"
print "****************************************************"
print
f=open("Tests_gradient_optimization3.txt","w")
sys.stdout = f
kernel1= gedi.kernel.ExpSineSquared(15.0, 1.0, 30.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel2= gedi.kernel.QuasiPeriodic(15.0, 1.0, 1.0, 30.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel3= gedi.kernel.ExpSquared(15.0,1.0)+ \
gedi.kernel.WhiteNoise(1.0)
likelihood1= gedi.kernel_likelihood.likelihood(kernel1,t,y,yerr)
likelihood2= gedi.kernel_likelihood.likelihood(kernel2,t,y,yerr)
likelihood3= gedi.kernel_likelihood.likelihood(kernel3,t,y,yerr)
print "Initial kernel # log-likelihood"
print kernel1, '#', likelihood1
print kernel2, '#', likelihood2
print kernel3, '#', likelihood3
print
optimization1= gedi.kernel_optimization.committed_optimization(kernel1, \
t,y,yerr)
#optimization1= gedi.kernel_optimization.single_optimization(kernel1, \
# t,y,yerr,method="altsda")
optimization2= gedi.kernel_optimization.committed_optimization(kernel2, \
t,y,yerr)
#optimization2= gedi.kernel_optimization.single_optimization(kernel2, \
# t,y,yerr,method="altsda")
optimization3= gedi.kernel_optimization.committed_optimization(kernel3, \
t,y,yerr)
#optimization= gedi.kernel_optimization.single_optimization(kernel3, \
# t,y,yerr,method="altsda")
print "Final kernel # log-likelihood"
print optimization1[1], '#', optimization1[0]
print optimization2[1], '#', optimization2[0]
print optimization3[1], '#', optimization3[0]
print
sys.stdout = sys.__stdout__
f.close()
print "Done."
##### Final Graphics #########################################################
print
print "****************************************************"
print "Preparing graphics"
print "****************************************************"
print
xcalc= np.linspace(0,101,500)
[mu1,std1]= gedi.kernel_likelihood.compute_kernel(optimization1[1], \
t,xcalc,y,yerr)
[mu2,std2]= gedi.kernel_likelihood.compute_kernel(optimization2[1], \
t,xcalc,y,yerr)
[mu3,std3]= gedi.kernel_likelihood.compute_kernel(optimization3[1], \
t,xcalc,y,yerr)
pl.figure('fit1') #Graphics
pl.fill_between(xcalc, mu1+std1, mu1-std1, color="k", alpha=0.1)
pl.plot(xcalc, mu1+std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1-std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_ess_spots3.png")
pl.close('fit1')
pl.figure('fit2') #Graphics
pl.fill_between(xcalc, mu2+std2, mu2-std2, color="k", alpha=0.1)
pl.plot(xcalc, mu2+std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2-std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_qp_spots3.png")
pl.close('fit2')
pl.figure('fit3') #Graphics
pl.fill_between(xcalc, mu3+std3, mu3-std3, color="k", alpha=0.1)
pl.plot(xcalc, mu3+std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3-std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_es_spots3.png")
pl.close('fit3')
print "Done."
###############################################################################
##### Optimization ############################################################
print
print "****************************************************"
print "Running optimization algorithms - round 4"
print "****************************************************"
print
f=open("Tests_gradient_optimization4.txt","w")
sys.stdout = f
kernel1= gedi.kernel.ExpSineSquared(25.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel2= gedi.kernel.QuasiPeriodic(25.0, 1.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel3= gedi.kernel.ExpSquared(25.0,1.0)+ \
gedi.kernel.WhiteNoise(1.0)
likelihood1= gedi.kernel_likelihood.likelihood(kernel1,t,y,yerr)
likelihood2= gedi.kernel_likelihood.likelihood(kernel2,t,y,yerr)
likelihood3= gedi.kernel_likelihood.likelihood(kernel3,t,y,yerr)
print "Initial kernel # log-likelihood"
print kernel1, '#', likelihood1
print kernel2, '#', likelihood2
print kernel3, '#', likelihood3
print
optimization1= gedi.kernel_optimization.committed_optimization(kernel1, \
t,y,yerr)
#optimization1= gedi.kernel_optimization.single_optimization(kernel1, \
# t,y,yerr,method="altsda")
optimization2= gedi.kernel_optimization.committed_optimization(kernel2, \
t,y,yerr)
#optimization2= gedi.kernel_optimization.single_optimization(kernel2, \
# t,y,yerr,method="altsda")
optimization3= gedi.kernel_optimization.committed_optimization(kernel3, \
t,y,yerr)
#optimization= gedi.kernel_optimization.single_optimization(kernel3, \
# t,y,yerr,method="altsda")
print "Final kernel # log-likelihood"
print optimization1[1], '#', optimization1[0]
print optimization2[1], '#', optimization2[0]
print optimization3[1], '#', optimization3[0]
print
sys.stdout = sys.__stdout__
f.close()
print "Done."
##### Final Graphics #########################################################
print
print "****************************************************"
print "Preparing graphics"
print "****************************************************"
print
xcalc= np.linspace(0,101,500)
[mu1,std1]= gedi.kernel_likelihood.compute_kernel(optimization1[1], \
t,xcalc,y,yerr)
[mu2,std2]= gedi.kernel_likelihood.compute_kernel(optimization2[1], \
t,xcalc,y,yerr)
[mu3,std3]= gedi.kernel_likelihood.compute_kernel(optimization3[1], \
t,xcalc,y,yerr)
pl.figure('fit1') #Graphics
pl.fill_between(xcalc, mu1+std1, mu1-std1, color="k", alpha=0.1)
pl.plot(xcalc, mu1+std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1-std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_ess_spots4.png")
pl.close('fit1')
pl.figure('fit2') #Graphics
pl.fill_between(xcalc, mu2+std2, mu2-std2, color="k", alpha=0.1)
pl.plot(xcalc, mu2+std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2-std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_qp_spots4.png")
pl.close('fit2')
pl.figure('fit3') #Graphics
pl.fill_between(xcalc, mu3+std3, mu3-std3, color="k", alpha=0.1)
pl.plot(xcalc, mu3+std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3-std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3, color="k", alpha=1, lw=0.5)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_es_spots4.png")
pl.close('fit3')
print "Done."
###############################################################################
| mit |
jolynch/python-hqsom | hqsom/som_test.py | 1 | 23086 | from som import *
from rsom import *
from hqsom import *
from hqsom_audio import *
from preproc.images import *
import preproc.audio as audio
import getopt, sys
import traceback
#import matplotlib.pyplot as plt
import pickle
import genetic_algo
tests = ("som","rsom", "hqsom", "hqsom_noise", "hqsom_noise_multiple", "image_gen", "hqsom_77_network", "hqsom_77", "audio")
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
use_pure = False
input_vectors = np.array([
[0.1 , 0.1 , 0.1 , 0.1],
[.01 ,.001 , 0.6 , 0.8 ],
[0.3 , 0.3 , 0.3 , 0.3],
[0.0 , 0.8 , 0.0 , 0.0],
[1.0 , 0.9 , 0.95, 0.82],
[0.35,0.95 , 0.24, 0.76]])
rate, spread, size, input_size = .4, .2, len(input_vectors), len(input_vectors[0])
def test_som():
som1 = SOM(input_size, size, pure=use_pure)
assert som1
#Test that a single vector can be trained on
print "-- Training on single input --"
for i in range(10):
som1.update(input_vectors[0], rate, spread)
print "Got MSE of {}".format(som1.mse(input_vectors[0]))
assert som1.mse(input_vectors[0]) < 1e-3
#Test that all vectors can be trained on in a 1:1 network input_size = size
som1 = SOM(input_size, size, pure=use_pure)
print "-- Training on all inputs --"
for i in range(1000):
som1.update(input_vectors[i%len(input_vectors)], rate, spread)
total_mse = 0
for inp in input_vectors:
total_mse += som1.mse(inp)
print "Got MSE of {}".format(som1.mse(inp))
assert som1.mse(inp) < .3
assert total_mse < .05 * len(input_vectors)
#Checking Activation vectors
activated = set()
for inp in input_vectors:
activated.add(som1.bmu(inp))
print "Applying signal: {}".format(inp)
print "Activating {}".format(som1.units[som1.bmu(inp)])
err = abs(len(activated) - len(input_vectors))
print activated
print "All activated units: {}".format(activated)
print "Error: {} vs max {}".format(err, .5*len(input_vectors))
assert err <= .5*len(input_vectors)
#For paper, disregard
#data = np.transpose(np.array([
#[.3 , .7 , .1 , .14 , .01],
#[.3 , .1 , .01 , .16 , .9],
#[.3 , .03 , .8 , .7 , .01]]))
#som1 = SOM(3,5,True)
#som1.units = data
#som1.update(np.array((.1,.1,.1)), .2, 1)
#print som1.units.transpose()
#I'm kind of unsure how to really test this ...
def test_rsom():
rsom1 = RSOM(input_size, size, pure=use_pure)
alpha = .3
#Test a time dependent sequence
print "-- Training on alternating values --"
for i in range(1000):
rsom1.update(input_vectors[i%2], rate, spread, alpha)
rsom1.update(input_vectors[2], rate, spread, alpha)
rsom1.reset()
assert rsom1.differences[0][0] == 0
for i in range(3):
print "Got MSE of {}".format(rsom1.mse(input_vectors[i]))
print "Activation vector: {}".format(rsom1.activation_vector(input_vectors[i], True))
assert rsom1.mse(input_vectors[i%2]) < .3
def test_hqsom():
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
g1,g2,s1,s2,a = .1,.1,16,90,.1
hqsom = HQSOM(9,18,3, use_pure_implementation=use_pure)
def flush(num):
for l in range(num):
hqsom.update(test_data[0], g1,g2,s1,s2,a)
num_cycles, num_repeats = 25, 11
total_run_count, seq_count = num_cycles*num_repeats*9, 0
for j in range(num_cycles):
for i in range(num_repeats):
print "update {}/{}".format(seq_count,total_run_count)
hqsom.reset()
seq = ()
if i %2 == 0:
seq = (1,2,3,1,2,3)
else:
seq = (4,5,6,4,5,6)
for k in seq:
hqsom.update(test_data[k], g1, g2, s1, s2, a)
hqsom.reset()
seq_count += 9
c = [hqsom.activation_vector(t) for t in test_data]
print c
assert c[0] != c[1] and c[1] != c[4]
assert c[1] == c[2] and c[2] == c[3]
assert c[4] == c[5] and c[5] == c[6]
assert c[3] != c[4]
def test_hqsom_noise(noise_std=.1):
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
#Add in gausian noise
noise = np.random.normal(0.0,noise_std,test_data.shape)
test_data = test_data + noise
g1,g2,s1,s2,a = .1,.1,16,90,.1
#Due to the noise we have to add many more map units
hqsom = HQSOM(9,18,3, use_pure_implementation=use_pure)
print "bleh"
def flush(num):
for l in range(num):
hqsom.update(test_data[0], g1,g2,s1,s2,a)
num_cycles, num_repeats = 25, 11
total_run_count, seq_count = num_cycles*num_repeats*9, 0
for j in range(num_cycles):
for i in range(num_repeats):
print "update {}/{}".format(seq_count,total_run_count)
hqsom.reset()
if i %2 == 0:
seq = (1,2,3,1,2,3)
else:
seq = (4,5,6,4,5,6)
for k in seq:
hqsom.update(test_data[k], g1, g2, s1, s2, a)
hqsom.reset()
seq_count += 9
#Re-do the test data to test on different noisy data
print "Generating different test data for activating"
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
#Add in gausian noise
noise = np.random.normal(0.0,noise_std,test_data.shape)
test_data = test_data + noise
g1,g2,s1,s2,a = .1,.1,16,90,.1
c = [hqsom.activation_vector(t) for t in test_data]
print c
assert c[0] != c[1] and c[1] != c[4]
assert c[1] == c[2] and c[2] == c[3]
assert c[4] == c[5] and c[5] == c[6]
assert c[3] != c[4]
def test_hqsom_noise_multiple():
num_errors, num_tests, noise_std = 0, 100, .2
for i in range(num_tests):
try:
test_hqsom_noise(noise_std)
except:
num_errors += 1
print "Passed {} out of {}".format(num_tests-num_errors, num_tests)
assert num_errors < .25 * num_tests
def enumerate_spiral(l):
coords, coord, original_l = [], [0,0], l
while l > 0:
#Go down
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[1]+=1
#print "going down from {} to {}".format(coords[-1], coord)
if l < original_l:
l -= 1
#Go right
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[0]+=1
#print "going right from {} to {}".format(coords[-1], coord)
#Go up
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[1]-=1
#print "going up from {} to {}".format(coords[-1], coord)
l -= 1
#Go left
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[0]-=1
#print "going left from {} to {}".format(coords[-1], coord)
coords.append(coord)
return coords
def test_hqsom_77_network():
output_size =17
hqsom = PaperFig3Hierarchy(65,17,513,output_size, use_pure_implementation=use_pure)
g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .1,.01,.1,.001, 16.0, 100.0, 4.0, 200.0, .1, .01
data_image = Square_Image(5,(1,1))
data = data_image.data()
hqsom.update(data,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
print hqsom.activation_vector(data,False,True)
assert hqsom.activation_vector(data) != None
def test_hqsom_77():
#genetic_algo.Generate the test sequence, note that we must do a spiral exposure to get the
#correct temporal-spatial representations in the SOMS
#7x7 only has one possible test (cycled twice of course)
coord_test = {"large":[(7,0,0),(7,0,0)]}
#5x5 has 9 possible positions (cycled twice of course)
coord_test["medium"] = [(5,i,j) for (i,j) in enumerate_spiral(2)]
coord_test["medium"] = coord_test["medium"][::-1] + coord_test["medium"]
#3x3 has 25 possible positions (cycled twice of course)
coord_test["small"] = [(3,i,j) for (i,j) in enumerate_spiral(4)]
coord_test["small"] = coord_test["small"][::-1] + coord_test["small"]
#######The available data sets
square_data, diamond_data, x_data = [], [], []
#First we spiral out, then back in for each data set
for data_type,data_class,data_container in [("square", Square_Image, square_data),
("diamond", Diamond_Image, diamond_data),
("x", X_Image, x_data)]:
for data_set in ("large","medium", "small"):
for (w,x,y) in coord_test[data_set]:
if data_type == "diamond":
w,x,y = w/2, x+w/2, y+w/2
image_data = data_class(w,(x,y))
data_container.append(image_data.data())
image_data.save("data/{}_#{}#_".format(data_type, str(len(data_container)).zfill(2)))
blank_data = [Data_Image().data() for i in range(20)]
#print len(square_data)
#print len(diamond_data)
#print len(x_data)
#Paper settings
#Make sure we don't use any of our "improvements"
#bottom_som_size, bottom_rsom_size, top_som_size, output_size = 65,17,513,17
#hqsom = PaperFig3Hierarchy(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = use_pure)
#g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .1,.01,.1,.001, 16.0, 100.0, 4.0, 250.0, .1, .01
#run_name = "PAPER_RUN_GAUSSIAN_"
#num_cycles, data_sets, num_repeats = 150, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 5
#Really good TWO classifier:
#bottom_som_size, bottom_rsom_size, top_som_size, output_size = 10,80,10,5
#hqsom = PaperFig3Hierarchy(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = True)
#g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .2,.4,.1,.5, 10.0, 80.0, 14.0, 100.0, .8, .01
#run_name = "TWO_CLASS_"
#num_cycles, data_sets, num_repeats = 1, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 1
#Our settings
bottom_som_size, bottom_rsom_size, top_som_size, output_size = 40, 25, 150, 7
hqsom = PaperFig3Hierarchy(bottom_som_size,
bottom_rsom_size,
top_som_size,output_size,
use_pure_implementation=use_pure)
g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = 0.1,0.01,0.1,0.05,20.0,150.0,15.0,250.0,0.1,0.02
run_name = "REFERENCE_19_OUR_SETTINGS_"
num_cycles, data_sets, num_repeats = 50, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 4
seq_num = 0
MAP_Image(hqsom.top_hqsom.rsom.units, "output/{}INITIAL_TOP_RSOM_".format(run_name)).save()
total_run_count = num_cycles * len(data_sets)*(len(data_sets[0][1])*num_repeats)
for i in range(num_cycles):
for data_type, data_set in data_sets:
for j in range(num_repeats):
MAP_Image(hqsom.top_hqsom.rsom.units,"output/{}TOP_RSOM_{}_{}_{}".format(run_name,i,data_type,j)).save()
for d in data_set:
hqsom.update(d,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
print "{} update {}/{}".format(data_type, seq_num, total_run_count)
print "{} current BMU: {}".format(data_type, hqsom.activation_vector(d))
seq_num += 1
data_type = "BLANK"
#Instead of training on blank data
print "Resetting SOMS"
hqsom.reset()
MAP_Image(hqsom.top_hqsom.rsom.units,"output/{}TOP_RSOM_{}_{}".format(run_name,i,data_type)).save()
#for d in blank_data:
#hqsom.update(d,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
#print "{} update {}/{}".format(data_type, seq_num, total_run_count)
#print "{} current BMU: {}".format(data_type, hqsom.activation_vector(d))
#seq_num += 1
print "Collecting Classification Data, please wait this can take time"
data_sets = [("BLANK", blank_data)]+data_sets
output_hash = {"BLANK":[0]*output_size,"SQUARE":[0]*output_size,"DIAMOND":[0]*output_size,"X":[0]*output_size}
for data_name, data_collection in data_sets:
for i in data_collection:
result = hqsom.activation_vector(i)
output_hash[data_name][result] += 1
print "Run: {}".format(run_name)
print "Using the parameters g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = {},{},{},{},{},{},{},{},{},{}".format(g1,g2,g3,g4,s1,s2,s3,s4,a1,a2)
print "Using {} cycles of each data set repeated {} times".format(num_cycles, num_repeats)
print "BSOM, BRSOM, TSOM, TRSOM sizes: {}, {}, {}, {}".format(bottom_som_size, bottom_rsom_size, top_som_size, output_size)
for data_name, data_collection in data_sets:
mode = np.argmax(output_hash[data_name])
num_items = float(len(data_collection))
print "#"*80
print "Data Set: {}".format(data_name)
print "Most Frequently Classified As (MODE): {}".format(mode)
results = np.array(output_hash[data_name])
print "Full Distribution over Final RSOM Map Space:"
print results / num_items
MAP_Image(hqsom.bottom_hqsom_list[5].rsom.units,"output/{}FINAL_MIDDLE_RSOM".format(run_name)).save()
#WE ONLY SUPPORT wave files of the <b>same bitrate</b>
def test_audio(hqsom=None):
print "Loading songs into memory"
song_rock = audio.Spectrogram("data/music/Californication.wav")
song_rock2 = audio.Spectrogram("data/music/ByWay.wav")
song_techno = audio.Spectrogram("data/music/Everybody.wav")
song_techno2 = audio.Spectrogram("data/music/DayNNight.wav")
song_classical = audio.Spectrogram("data/music/Bells.wav")
song_classical2 = audio.Spectrogram("data/music/Symp9.wav")
print "Done loading songs into memory"
songs = [
("Techno", song_techno),
("TechnoTEST", song_techno2),
("Classical", song_classical),
("ClassicalTEST", song_classical2),
("Rock", song_rock),
("RockTEST", song_rock2),
]
song_types = [i for (i,j) in songs]
num_seconds, test_length = .1, 10
#Get num_second second slices of each song, looking to a cache first
try:
(n,saved_songs,final_data) = pickle.load(open("cache.p", "rb"))
if not n == num_seconds or not saved_songs == tuple(song_types):
raise Exception
print "Found data in cache, skipping generation"
except:
print "genetic_algo.Generating ffts"
raw_data = dict([(i,None) for i in song_types])
for (song_type, song_file) in songs:
print "genetic_algo.Generating data on the fly for {} song".format(song_type)
fft_length = song_file.sample_rate * num_seconds
#To get a power of 2
fft_length = int(2**np.ceil(np.log(fft_length)/np.log(2)));
print "Using fft_length of {}".format(fft_length)
raw_data[song_type] = song_file.get_spectrogram(fft_length)
print "Reshaping ffts into length 128 inputs"
final_data = {}
for song_type in song_types:
data = raw_data[song_type]
new_data = np.zeros((data.shape[0], 128))
bucket_sum, spect = 0, None
for spect_index in range(len(data)):
print "{} of {} Spectrograms processed".format(spect_index, len(data))
spect = data[spect_index]
window_size = len(spect) / 128
bucket_sum = 0
for i in range(128):
#bucket_sum = np.mean(spect[i*window_size:i*window_size+window_size])
new_data[spect_index][i] = spect[i*window_size]
#new_data[spect_index] = new_data[spect_index] - min(new_data[spect_index])
#new_data[spect_index] = new_data[spect_index] / np.linalg.norm(new_data[spect_index])
final_data[song_type] = new_data
pickle.dump((num_seconds, tuple(song_types), final_data), open("cache.p","wb"))
"""
plt.matshow(np.transpose(final_data["Rock"]))
plt.title("Rock")
plt.matshow(np.transpose(final_data["Techno"]))
plt.title("Techno")
plt.matshow(np.transpose(final_data["Classical"]))
plt.title("Classical")
plt.matshow(np.transpose(final_data["ClassicalTEST"]))
plt.title("Classical_TEST_DATA")
plt.matshow(np.transpose(final_data["TechnoTEST"]))
plt.title("Techno_TEST_DATA")
plt.matshow(np.transpose(final_data["RockTEST"]))
plt.title("Rock_TEST_DATA")
"""
if hqsom is None:
output_size = 5
hqsom = Hierarchy1D(
LayerConf1D(2, 64, 128, 0,
50, 0.2, 200,
40, .7, 0.15, 100, use_pure),
LayerConf1D(2, 1, 2, 0,
50, 0.2, 200,
20, .7, 0.15, 100, use_pure),
LayerConf1D(1, 2, 2, 0,
32, 0.2, 200,
output_size, .05, 0.2, 100, use_pure),
)
else:
output_size = hqsom.output_size
hqsom = hqsom.to_hierarchy()
#hqsom = NaiveAudioClassifier(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = True)
#hqsom = genetic_algo.Genome(128, output_size).to_hierarchy()
#genome = genetic_algo.Genome(128, 5, [genetic_algo.Gene(128, 1, [128, 1, 0.5349470927446156, 58, 0.16262059789324113, 93, 69, 0.38946495945845583, 0.18591242958088183, 449]),
# genetic_algo.Gene(1, 1, [1, 1, 0.9697823529658623, 67, 0.06338912516811035, 484, 5, 0.07069243885373111, 0.30821633466399, 312])])
#genome = genetic_algo.Genome(128, 5, [
# genetic_algo.Gene(128, 1, [128, 1, 0.8191182230079156, 86, 0.13323972043189236, 175, 31, 0.3806979377580392, 0.8121811036319838, 98]),
# genetic_algo.Gene(1, 1, [1, 1, 0.8727135450401478, 62, 0.3453597203536144, 121, 50, 0.755878448191539, 0.6818380459687157, 325]),
# genetic_algo.Gene(1, 1, [1, 1, 0.4174074007331876, 89, 0.7549203282530946, 50, 5, 0.7849685525193116, 0.5789786448249847, 263])
# ])
#hqsom = genome.to_hierarchy()
print hqsom.layer_configs
run_name = "AUDIO_TEST"
#Testing schema:
# 1) Expose to entirety of three songs
# 2) Pick 3 random sequences of test_length in size from each song, run through
# 3) Clear at each in between
seq_num = 0
num_cycles, num_repeats = 1, 1
total_run_count = num_cycles*sum([(len(final_data[x])) for x in song_types])
for i in range(num_cycles):
for song_type in song_types:
if song_type == "ClassicalTEST" or song_type == "TechnoTEST" or song_type == "RockTEST":
print "Skipping test data: {}".format(song_type)
continue
for spectrum in final_data[song_type]:
hqsom.update(spectrum)
#print hqsom.activation_vector(spectrum, True, True)
print "{} update {}/{}".format(song_type, seq_num, total_run_count)
seq_num += 1
print "Resetting RSOMs"
hqsom.reset()
total_run_count = num_cycles*2*len(song_types)*test_length
seq_num = 0
for i in range(num_cycles*2):
for song_type in song_types:
if song_type == "ClassicalTEST" or song_type == "TechnoTEST" or song_type == "RockTEST":
print "Skipping test data: {}".format(song_type)
continue
num_spectrograms = len(final_data[song_type])
r_index = np.random.randint(0,num_spectrograms-test_length)
for index in range(r_index, r_index+test_length):
hqsom.update(final_data[song_type][index])
#print hqsom.activation_vector(spectrum, False, True)
print "{} update {}/{}".format(song_type, seq_num, total_run_count)
seq_num += 1
print "Resetting RSOMs"
hqsom.reset()
print "Run: {}".format(run_name)
print "Using Network:"
print hqsom.layer_configs
print "num_cycles, num_repeats, num_seconds, test_length = {}, {}, {}, {}".format(num_cycles, num_repeats, num_seconds, test_length)
for data_name in song_types:
print "#"*80
print "Results for {}".format(data_name)
data_collection = final_data[data_name]
results =[0]*output_size
for spect in data_collection:
results[hqsom.activation_vector(spect)] += 1
t = sum(results)
results = [float(i)/t for i in results]
results = np.array(results)
print "Final Distribution Over Map Space"
print results
print "MODE: {}".format(np.argmax(results))
#plt.show()
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "t:l", ["list","test="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
#So that we get reproduceable results
np.random.seed(15717)
for o,a in opts:
if o in ("-t", "--test"):
print "Running {} test:".format(a)
try:
eval("test_"+a)()
except Exception as e:
print e
traceback.print_exc(file=sys.stdout)
print "!!! ERROR !!!"
else:
print "SUCCESS"
elif o in ("-l", "--list"):
print "List of tests: {}".format(tests)
# print help information and exit:
if len(opts) == 0:
print "Running all Tests"
for test in tests:
print "#"*80
print "Running test on: {}".format(test)
print "-"*80
try:
eval("test_"+test)()
except Exception as e :
print e
traceback.print_exc(file=sys.stdout)
print "!!! ERROR !!!"
else:
print "SUCCESS"
print "#"*80
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/testing/jpl_units/Duration.py | 12 | 6736 | #===========================================================================
#
# Duration
#
#===========================================================================
"""Duration module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class Duration(object):
"""Class Duration in development.
"""
allowed = [ "ET", "UTC" ]
#-----------------------------------------------------------------------
def __init__( self, frame, seconds ):
"""Create a new Duration object.
= ERROR CONDITIONS
- If the input frame is not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the duration. Must be 'ET' or 'UTC'
- seconds The number of seconds in the Duration.
"""
if frame not in self.allowed:
msg = "Input frame '%s' is not one of the supported frames of %s" \
% ( frame, str( self.allowed ) )
raise ValueError( msg )
self._frame = frame
self._seconds = seconds
#-----------------------------------------------------------------------
def frame( self ):
"""Return the frame the duration is in."""
return self._frame
#-----------------------------------------------------------------------
def __abs__( self ):
"""Return the absolute value of the duration."""
return Duration( self._frame, abs( self._seconds ) )
#-----------------------------------------------------------------------
def __neg__( self ):
"""Return the negative value of this Duration."""
return Duration( self._frame, -self._seconds )
#-----------------------------------------------------------------------
def seconds( self ):
"""Return the number of seconds in the Duration."""
return self._seconds
#-----------------------------------------------------------------------
def __nonzero__( self ):
"""Compare two Durations.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
return self._seconds != 0
if six.PY3:
__bool__ = __nonzero__
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
self.checkSameFrame( rhs, "compare" )
return cmp( self._seconds, rhs._seconds )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to add.
= RETURN VALUE
- Returns the sum of ourselves and the input Duration.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
if isinstance( rhs, U.Epoch ):
return rhs + self
self.checkSameFrame( rhs, "add" )
return Duration( self._frame, self._seconds + rhs._seconds )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Duration.
"""
self.checkSameFrame( rhs, "sub" )
return Duration( self._frame, self._seconds - rhs._seconds )
#-----------------------------------------------------------------------
def __mul__( self, rhs ):
"""Scale a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( rhs ) )
#-----------------------------------------------------------------------
def __rmul__( self, lhs ):
"""Scale a Duration by a value.
= INPUT VARIABLES
- lhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( lhs ) )
#-----------------------------------------------------------------------
def __div__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds / float( rhs ) )
#-----------------------------------------------------------------------
def __rdiv__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, float( rhs ) / self._seconds )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the Duration."""
return "%g %s" % ( self._seconds, self._frame )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the Duration."""
return "Duration( '%s', %g )" % ( self._frame, self._seconds )
#-----------------------------------------------------------------------
def checkSameFrame( self, rhs, func ):
"""Check to see if frames are the same.
= ERROR CONDITIONS
- If the frame of the rhs Duration is not the same as our frame,
an error is thrown.
= INPUT VARIABLES
- rhs The Duration to check for the same frame
- func The name of the function doing the check.
"""
if self._frame != rhs._frame:
msg = "Cannot %s Duration's with different frames.\n" \
"LHS: %s\n" \
"RHS: %s" % ( func, self._frame, rhs._frame )
raise ValueError( msg )
#===========================================================================
| mit |
sumspr/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/cluster/plot_lena_segmentation.py | 8 | 2421 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e339.py | 2 | 6317 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=4,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
250: 1e-3
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
NUM_FILTERS = 10
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt((N / 2) * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (source.n_seq_per_batch * (source.seq_length // 2),
source.n_inputs)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_io.py | 7 | 6574 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from pandas.compat import StringIO, u, long
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesToCSV(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
self.assertTrue(ts.name is None)
self.assertTrue(ts.index.name is None)
# GH10483
self.ts.to_csv(path, header=True)
ts_h = Series.from_csv(path, header=0)
self.assertTrue(ts_h.name == 'ts')
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
self.assertIsNone(series.index.name)
assert_series_equal(self.series, series, check_names=False)
self.assertTrue(series.name is None)
self.assertTrue(series.index.name is None)
self.series.to_csv(path, header=True)
series_h = Series.from_csv(path, header=0)
self.assertTrue(series_h.name == 'series')
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv(path, sep='|')
checkseries = Series({datetime(1998, 1, 1): 1.0,
datetime(1999, 1, 1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv(path, sep='|', parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
with io.open(path, newline=None) as f:
lines = f.readlines()
assert (lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding='UTF-8')
buf.seek(0)
s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')
assert_series_equal(s, s2)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format='%.2f')
rs = Series.from_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
split = s.str.split(r'\s+and\s+')
buf = StringIO()
split.to_csv(buf)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path=None)
self.assertIsInstance(csv_str, str)
class TestSeriesIO(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_to_frame(self):
self.ts.name = None
rs = self.ts.to_frame()
xp = pd.DataFrame(self.ts.values, index=self.ts.index)
assert_frame_equal(rs, xp)
self.ts.name = 'testname'
rs = self.ts.to_frame()
xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
rs = self.ts.to_frame(name='testdifferent')
xp = pd.DataFrame(
dict(testdifferent=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
def test_to_dict(self):
self.assert_series_equal(Series(self.ts.to_dict(), name='ts'), self.ts)
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
new_ts = self.round_trip_pickle(ts)
self.assertEqual(new_ts.index.freq, 'M')
def test_pickle_preserve_name(self):
for n in [777, 777., 'name', datetime(2001, 11, 11), (1, 2)]:
unpickled = self._pickle_roundtrip_name(tm.makeTimeSeries(name=n))
self.assertEqual(unpickled.name, n)
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_to_frame_expanddim(self):
# GH 9762
class SubclassedSeries(Series):
@property
def _constructor_expanddim(self):
return SubclassedFrame
class SubclassedFrame(DataFrame):
pass
s = SubclassedSeries([1, 2, 3], name='X')
result = s.to_frame()
self.assertTrue(isinstance(result, SubclassedFrame))
expected = SubclassedFrame({'X': [1, 2, 3]})
assert_frame_equal(result, expected)
class TestSeriesToList(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_tolist(self):
rs = self.ts.tolist()
xp = self.ts.values.tolist()
assert_almost_equal(rs, xp)
# datetime64
s = Series(self.ts.index)
rs = s.tolist()
self.assertEqual(self.ts.index[0], rs[0])
def test_tolist_np_int(self):
# GH10904
for t in ['int8', 'int16', 'int32', 'int64']:
s = pd.Series([1], dtype=t)
self.assertIsInstance(s.tolist()[0], (int, long))
def test_tolist_np_uint(self):
# GH10904
for t in ['uint8', 'uint16']:
s = pd.Series([1], dtype=t)
self.assertIsInstance(s.tolist()[0], int)
for t in ['uint32', 'uint64']:
s = pd.Series([1], dtype=t)
self.assertIsInstance(s.tolist()[0], long)
def test_tolist_np_float(self):
# GH10904
for t in ['float16', 'float32', 'float64']:
s = pd.Series([1], dtype=t)
self.assertIsInstance(s.tolist()[0], float)
| gpl-3.0 |
adamgreenhall/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
jajcayn/pyclits | examples/4-wavelet_analysis.py | 1 | 2304 | """
Examples for pyCliTS -- https://github.com/jajcayn/pyclits
"""
# import modules
import pyclits as clt
from datetime import date
import matplotlib
# change for your favourite backend
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
# with wavelet module, in addition to just obtaining one oscillatory component [as in example 2-oscillatory.py],
# we can do the classical wavelet analysis...
# lets analyse the periods in NINO34 data
nino34 = clt.data_loaders.load_enso_index("example_data/nino34raw.txt", "3.4", start_date = date(1870, 1, 1),
end_date = date(2017, 7, 1), anom = False)
# for this we need to use wavelet module in its raw form
dt = 1./12 # in years -- its monthly data, and we want the result to be in years
pad = True # recommended -- will pad the time series with 0 up to the length of power of 2
mother = clt.wavelet_analysis.morlet # mother wavelet
dj = 0.25 # this will do 4 sub-octaves per octave
s0 = 6 * dt # this will set first period at 6 months
j1 = 7 / dj # this says do 7 powers-of-two with dj sub-octaves each
k0 = 6. # default for Morlet mother
wave, period, scale, coi = clt.wavelet_analysis.continous_wavelet(nino34.data, dt = dt, pad = pad, wavelet = mother,
dj = dj, s0 = s0, j1 = j1, k0 = k0)
power = (np.abs(wave)) ** 2 # compute wavelet power spectrum
# set time array and levels to plot
time = np.arange(nino34.time.shape[0]) * dt + nino34.get_date_from_ndx(0).year
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
# plot wavelet
cs = plt.contourf(time, period, np.log2(power), len(levels))
im = plt.contourf(cs, levels = np.log2(levels))
# plot cone-of-influence
plt.plot(time, coi, 'k')
# set log scale and revert y-axis
plt.gca().set_yscale('log', base=2, subs=None)
plt.ylim([np.min(period), np.max(period)])
ax = plt.gca().yaxis
ax.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.ticklabel_format(axis='y', style='plain')
plt.gca().invert_yaxis()
# set labels and title
plt.xlabel('time [year]')
plt.ylabel('period [years]')
plt.title('NINO3.4 wavelet power spectrum')
# colorbar
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("bottom", size="5%", pad=0.5)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.show() | mit |
ddna1021/spark | python/pyspark/worker.py | 5 | 12014 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.taskcontext import TaskContext
from pyspark.files import SparkFiles
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasSerializer
from pyspark.sql.types import to_arrow_type
from pyspark.util import _get_argspec
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
raise TypeError("Return type of the user-defined functon should be "
"Pandas.Series, but is {}".format(type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def wrap_grouped_map_pandas_udf(f, return_type):
def wrapped(key_series, value_series):
import pandas as pd
argspec = _get_argspec(f)
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
arrow_return_types = (to_arrow_type(field.dataType) for field in return_type)
return [(result[result.columns[i]], arrow_type)
for i, arrow_type in enumerate(arrow_return_types)]
return wrapped
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
return arg_offsets, wrap_grouped_map_pandas_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(row_func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
num_udfs = read_int(infile)
udfs = {}
call_udf = []
mapper_str = ""
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# Create function like this:
# lambda a: f([a[0]], [a[0], a[1]])
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f'] = udf
split_offset = arg_offsets[0] + 1
arg0 = ["a[%d]" % o for o in arg_offsets[1: split_offset]]
arg1 = ["a[%d]" % o for o in arg_offsets[split_offset:]]
mapper_str = "lambda a: f([%s], [%s])" % (", ".join(arg0), ", ".join(arg1))
else:
# Create function like this:
# lambda a: (f0(a[0]), f1(a[1], a[2]), f2(a[3]))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF):
timezone = utf8_deserializer.loads(infile)
ser = ArrowStreamPandasSerializer(timezone)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
taskContext = TaskContext._getOrCreate()
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
java_port = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
main(sock_file, sock_file)
| apache-2.0 |
dikien/Machine-Learning-Newspaper | nytimes/step4_analysis_supervised_2.py | 1 | 4080 | # -*- coding: UTF-8 -*-
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge, ElasticNetCV
from sklearn.metrics import mean_squared_error, r2_score
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif, chi2
from sklearn.feature_selection import SelectKBest, f_regression
import pickle
from sklearn import preprocessing
from sklearn import cross_validation
import matplotlib.pyplot as plt
from itertools import cycle
def plot(nFeatures, data):
colors = cycle('rgbcmykw')
algorithm = sorted(data)
fig = plt.figure()
ax = fig.add_subplot(111)
for j, c in zip(algorithm, colors):
# for j in algorithm:
# plt.scatter(nFeatures, data[j], label=j)
ax.plot(nFeatures, data[j], label=j, color=c)
ax.scatter(nFeatures, data[j], color=c)
plt.xlabel("#-Features(SelectPercentile)")
plt.ylabel("Mean Squared Error")
plt.title("Mean Squared Error vs #-Features for different classifiers")
# ax.set_xscale("log")
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3,
box.width, box.height * 0.7])
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=3)
# ax.set_ylim(0, 6.0)
plt.legend(loc=2)
plt.show()
def preprocess(article_file, lable_file, k):
features = pickle.load(open(article_file))
features = np.array(features)
# transform non-numerical labels (as long as they are hashable and comparable) to numerical labels
lables = pickle.load(open(lable_file))
le = preprocessing.LabelEncoder()
le.fit(lables)
lables = le.transform(lables)
# print le.inverse_transform([0])
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, min_df=1,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features)
# selector : SelectPercentile
selector = SelectPercentile(f_classif, percentile=k)
selector.fit(features_train_transformed, lables)
# selector : SelectKBest
# selector = SelectKBest(k=k)
# selector.fit(features_train_transformed, lables)
# selector : chi2
# selector = SelectPercentile(score_func=chi2)
# selector.fit(features_train_transformed, lables)
features_train_transformed = selector.transform(features_train_transformed).toarray()
return features_train_transformed, lables, vectorizer, selector, le, features
nFeatures = np.arange(10, 40, 10)
data = {}
for k in nFeatures:
features, labels, vectorizer, selector, le, features_data = preprocess("pkl/article_2_people.pkl", "pkl/lable_2_people.pkl", k)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(features, labels, test_size=0.1, random_state=42)
for name, clf in [
('linear regression', LinearRegression(fit_intercept=True)),
# ('lasso()', Lasso()),
# ('elastic-net(.5)', ElasticNet(alpha=0.5)),
# ('lasso(.5)', Lasso(alpha=0.5)),
# ('ridge(.5)', Ridge(alpha=0.5))
]:
if not data.has_key(name):
data[name] = []
print "*" * 100
print('Method: {}'.format(name) + ' the number of feature is {}'.format(k))
# Fit on the whole data:
t0 = time()
clf.fit(features_train, labels_train)
print ("training time:", round(time()-t0, 3), "s")
# Predict on the whole data:
y_pred = clf.predict(features_test)
print ("predicting time:", round(time()-t0, 3), "s")
score_accuracy = mean_squared_error(y_pred, labels_test)
print('r2 score : {}'.format(r2_score(y_pred, labels_test)))
print('mean squared error : {}'.format(score_accuracy))
print "*"* 100
data[name].append(score_accuracy)
plot(nFeatures, data)
print data | bsd-3-clause |
jayflo/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
zhaozengguang/opencog | opencog/embodiment/Monitor/emotion_space_browser.py | 17 | 8987 | import numpy as np
import zmq
import json
import matplotlib as mpl
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
# configure the matplotlib settings
#mpl.rcParams['legend.fontsize'] = 10
from PyQt4 import QtGui, QtCore
from common import *
class ZmqMultiFiltersSubscriberThread(QThread):
data_update_signal = pyqtSignal(dict)
def __init__(self, widget, publish_endpoint, filter_list,
zmq_context = glb.zmq_context, parent = None):
"""
widget should contains the slot method as below:
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
# Some code here to process the data in json format
publish_endpoint tells the subscriber where the message source is
"""
# Initialize the thread
QThread.__init__(self)
# Connect the signal with the handler residing in widget
self.widget = widget
self.data_update_signal.connect(self.widget.handle_data_update)
# Initialize the ZeroMQ socket
self.socket = zmq_context.socket(zmq.SUB)
self.filter_list = filter_list
for filter_name in self.filter_list:
self.socket.setsockopt(zmq.SUBSCRIBE, filter_name)
self.socket.connect(publish_endpoint)
def run(self):
"""
Receive the message with matching filter_key from publish_endpoint
via ZeroMQ, discard the filter_key message and emit the signal to
corresponding handler with the actual data wrapped in python dictionary
"""
while True:
message = self.socket.recv()
# if the message contains only filter key, discard it
if message in self.filter_list:
self.latest_msg_filter = message
continue
# Unpack the message into python dictionary
json_dict = json.loads(message)
# Apply a filter name to this data dictionary, in order to distinguish it
json_dict['filter_key'] = self.latest_msg_filter
# Emit the signal which would evoke the corresponding handler
self.data_update_signal.emit(json_dict)
class EmotionSpace(FigureCanvas):
def __init__(self, publish_endpoint, parent=None, width=5, height=4, dpi=100):
# Initialize a cache for incoming data.
self.max_data_len = 25
## Feeling dictionary stores dominant feelings with different timestamps.
## Format: { timestamp -> dominant_feeling_name }
self.feeling_dict = {}
## Modulator dictionary caches modulators value at different time points.
## Format:
## { modulator_name -> { timestamp -> modulator_value } }
self.modulator_dict = {}
# The modulator dicitonary should be initialized in the format before
# appending data.
self.has_modulator_dict_initialized = False
# The legend list used to show legend in the chart
self.legend_list = []
# Chosen 3 modulators to be the axes of 3-dimensional space.
self.modulator_axes = []
self.axes_group_box = QtGui.QGroupBox("Modulator Axes:")
# Initialize variables related to graphics.
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = Axes3D(self.fig)
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
# Create and start ZeroMQ subscriber threads
self.zmq_sub_thread = ZmqMultiFiltersSubscriberThread(self,
publish_endpoint,
[
"PsiFeelingUpdaterAgent",
"PsiModulatorUpdaterAgent"
]
)
self.zmq_sub_thread.start()
# Initialize modulator dictionary and legend list
def initialize_modulator_dict(self, json_dict):
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k] = {}
self.modulator_dict[k][timestamp] = v
self.modulator_axes.append(k)
self.has_modulator_dict_initialized = True
#self.initialize_axes_group()
def initialize_axes_group(self):
vLayout = QtGui.QVBoxLayout()
for axes in self.emotion_space.get_axes_list():
vLayout.addWidget(QtGui.QCheckBox(axes))
self.axes_group.setLayout(vLayout)
axesGroupSizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum, True)
self.axes_group.setSizePolicy(axesGroupSizePolicy)
self.layout().insert(self.axes_group, 0)
def update_data(self, json_dict):
'''
Update the data of feeling and modulators.
As the parameter might be either feeling data or modulator data,
we return the state of the updating result, in which, 0 indicates
feeling dictionary has been updated, 1 indicates modulator dictionary
has been updated.
'''
if json_dict['filter_key'] == "PsiFeelingUpdaterAgent":
# Just leave feelings alone
del json_dict['filter_key']
timestamp = json_dict['timestamp']
del json_dict['timestamp']
# Get the feeling name with max value
dominant_feeling = max(json_dict, key = lambda k : json_dict.get(k))
# Cache the pair in the feeling dictionary
self.feeling_dict[timestamp] = dominant_feeling
# return state 0
return 0
elif json_dict['filter_key'] == "PsiModulatorUpdaterAgent":
# Remove filter key pair
del json_dict['filter_key']
if not self.has_modulator_dict_initialized:
self.initialize_modulator_dict(json_dict)
return
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k][timestamp] = v
# return state 1
return 1
else:
pass
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
"""
Process the data in json format
"""
update_state = self.update_data(json_dict)
# Only update the graphic when the widget is visible and
# modulator data has been updated.
if self.isVisible() and update_state == 1:
self.do_draw()
def do_draw(self):
self.axes.clear()
X = []
Y = []
Z = []
m = self.modulator_axes
print '=========='
for k, v in self.feeling_dict.iteritems():
X.append(self.modulator_dict[m[0]][k])
Y.append(self.modulator_dict[m[1]][k])
Z.append(self.modulator_dict[m[2]][k])
print str(self.modulator_dict[m[0]][k]) + ':' \
+ str(self.modulator_dict[m[1]][k]) + ':' \
+ str(self.modulator_dict[m[2]][k])
print '=========='
self.axes.grid(True)
self.axes.plot(X, Y, Z, '-o')
self.draw()
def get_axes_list(self):
return self.modulator_axes
class EmotionSpaceExplorer(QtGui.QWidget):
def __init__(self, parent=None):
super(EmotionSpaceExplorer, self).__init__(parent)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding, True)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.emotion_space = EmotionSpace("tcp://192.168.1.250:18002", self)
self.navigation_toolbar = NavigationToolbar(self.emotion_space, self)
mainLayout = QtGui.QVBoxLayout(self)
mainLayout.addWidget(self.emotion_space)
mainLayout.addWidget(self.navigation_toolbar)
self.setLayout(mainLayout)
| agpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.