repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
evidation-health/ContinuousTimeMarkovModel | data/claimsToInputs.py | 1 | 7673 | import pandas as pd
import datetime
import numpy as np
from scipy.linalg import expm
import os
from pickle import dump
import re
import argparse
pd.options.mode.chained_assignment = None
#Set up inputs
parser = argparse.ArgumentParser(description='Convert claims into inputs for the Sontag disease progression model.')
parser.add_argument(action='store', default='new_sample/', type=str, dest = 'claimsfile',
help='claims csv file to read in')
parser.add_argument('-o','--outdir', action='store', default='new_sample/', type=str, dest = 'outdir',
help='directory to output data')
parser.add_argument('-p','--paramdir', action='store', default=None, type=str, dest = 'paramdir',
help='directory to grab parameter initializations from')
parser.add_argument('-a','--anchorsdir', action='store', default=None, type=str, dest = 'anchorsdir',
help='directory to grab anchors from if not specifying paramdir')
parser.add_argument('-t','--timeperiod', action='store', default=90, type=int, dest = 'timeperiod',
help='number of days per time period')
parser.add_argument('-c','--maxclaims', action='store', default=None, type=int, dest = 'maxclaims',
help='number of days per time period')
parser.add_argument('-s','--minsteps', action='store', default=3, type=int, dest = 'minsteps',
help='minimum number of active time periods')
parser.add_argument('--seed', action='store', default=111, type=int, dest = 'randomseed',
help='random seed for sampling')
parser.add_argument('--kcomorbid', action='store', default=4, type=int, dest = 'K',
help='specify K if not specifying paramdir')
parser.add_argument('--mstates', action='store', default=4, type=int, dest = 'M',
help='specify M if not specifying paramdir')
#parser.add_argument('-p','--profile', action='store_true', dest = 'profile',
# help='turns on theano profiler')
args = parser.parse_args()
def loadAnchors(dataDirectory):
icd9Map = {}
with open(dataDirectory+'/fid.txt') as mapFile:
for i,icd9 in enumerate(mapFile):
icd9Map[icd9.strip()] = i
mapFile.close()
#print icd9Map
comorbidityNames = []
anchors = []
with open(dataDirectory+'/anchor_icd9.csv') as anchorFile:
for i,line in enumerate(anchorFile):
text = line.strip().split(',')
comorbidityNames.append(text[0])
comorbAnchors = []
for codeStr in text[1:]:
for key in icd9Map.keys():
l = re.search(codeStr,key)
if l is not None:
comorbAnchors.append(icd9Map[l.group(0)])
anchors.append((i,comorbAnchors))
anchorFile.close()
return anchors,comorbidityNames
claimsDF = pd.read_csv(args.claimsfile,index_col=0,parse_dates='date_of_service')
claimsDF.date_of_service = claimsDF.date_of_service.astype(np.datetime64)
if args.maxclaims is not None:
fid = claimsDF.primary_diag_cd.value_counts()[0:args.maxclaims].index.values
claimsDF.primary_diag_cd = claimsDF.primary_diag_cd.apply(lambda x: x if x in fid else np.nan)
claimsDF.dropna(inplace=True)
else:
fid = claimsDF.primary_diag_cd.unique()
D = len(fid)
tstepClaims = []
for user in claimsDF.groupby('pers_uniq_id'):
user[1].date_of_service = (user[1].date_of_service-user[1].date_of_service.min())/pd.Timedelta('1 days')
# user[1].date_of_service.max()/
nbins = np.ceil(user[1].date_of_service.max()/args.timeperiod)
bins = np.arange(0,(nbins+1)*args.timeperiod,args.timeperiod)
user[1].loc[:,'timeperiod'] = pd.cut(user[1].loc[:,'date_of_service'], bins, include_lowest=True,labels = range(int(nbins)))
user[1].loc[:,'timeperiod'] = user[1].loc[:,'timeperiod'].dropna().astype(int)
tstepClaims.append(user[1][['pers_uniq_id','timeperiod','primary_diag_cd']].drop_duplicates())
finalClaims = pd.concat(tstepClaims)
finalClaims = finalClaims.dropna()
fidDict = {}
for i,icd9 in enumerate(fid):
fidDict[icd9] = i
finalClaims.loc[:,'primary_diag_cd'] = finalClaims.primary_diag_cd.apply(lambda x: fidDict[x])
finalClaims = finalClaims.groupby(['pers_uniq_id'],as_index=False).apply(lambda x: x if x.timeperiod.nunique()>=args.minsteps else None).reset_index(drop=True)
Dmax = finalClaims.groupby(['pers_uniq_id','timeperiod']).count().max()[0]
T = finalClaims.groupby(['pers_uniq_id']).timeperiod.nunique().values
nObs = T.sum()
N = len(T)
zeroIndices = np.roll(T.cumsum(),1)
zeroIndices[0] = 0
O = np.ones((nObs,Dmax),dtype=int)*-1
obs_jumps = np.zeros((nObs),dtype=int)
counter = 0
prevTime = 0
for group in finalClaims.groupby(['pers_uniq_id','timeperiod']):
for i,val in enumerate(group[1].primary_diag_cd):
O[counter,i]=val
curTime = group[1].timeperiod.values[0]
obs_jumps[counter] = curTime-prevTime
prevTime = curTime
counter += 1
obs_jumps[zeroIndices] = 0
if args.paramdir is not None:
dataDirectory = args.paramdir
Q = np.loadtxt(dataDirectory+'/Q.txt')
pi = np.loadtxt(dataDirectory+'/pi.txt')
B0 = np.loadtxt(dataDirectory+'/piB.txt')
B = np.loadtxt(dataDirectory+'/B.txt')
Z = np.loadtxt(dataDirectory+'/Z.txt')
L = np.loadtxt(dataDirectory+'/L.txt')
anchors,comorbidityNames = loadAnchors(dataDirectory)
M = pi.shape[0]
K = Z.shape[0]
else:
#DES Random inputs
K = args.K
M = args.M
ranSeed = args.randomseed
np.random.seed(ranSeed)
L = np.random.rand(D)*0.3
np.random.seed(ranSeed+1)
Z = np.random.rand(K,D)
np.random.seed(ranSeed+2)
B = np.random.rand(K,M)
np.random.seed(ranSeed+3)
B0 = np.random.rand(K,M)
B0.sort(axis=1)
np.random.seed(ranSeed+4)
pi = np.random.rand(M)*(1-M*0.001)+0.001*M
pi = pi/pi.sum()
pi[::-1].sort()
np.random.seed(ranSeed+5)
Qvals = np.random.rand(M-1)
Q = np.zeros((M,M))
for i,val in enumerate(Qvals):
Q[i,i+1] = val
Q[i,i] = -val
if args.anchorsdir is not None:
anchors,comorbidityNames = loadAnchors(args.anchorsdir)
else:
anchors = []
comorbidityNames = []
jumpInd = {}
transMat = []
for i,jump in enumerate(np.unique(obs_jumps)[1:]):
jumpInd[jump] = i
transMat.append(expm(jump*Q))
#Generate S from parameters
S = np.zeros(nObs,dtype=np.int32)
S[zeroIndices] = np.random.choice(np.arange(M),size=(N),p=pi)
for n in range(N):
n0 = zeroIndices[n]
for t in range(1,T[n]):
S[n0+t] = np.random.choice(np.arange(M),p=transMat[jumpInd[obs_jumps[n0+t]]][S[n0+t-1]])
#Generate X from parameters
X = np.zeros((nObs,K))
X[zeroIndices] = np.random.binomial(n=1,p=B0[:,S[zeroIndices]].T)
for k in range(K):
for n in range(N):
n0 = zeroIndices[n]
if X[n0,k] == 1:
X[zeroIndices[n]:(zeroIndices[n]+T[n]),k] = 1
else:
changed = np.diff(S[zeroIndices[n]:(zeroIndices[n]+T[n])])
for t in range(1,T[n]):
if changed[t-1]==1 and np.random.rand()<B[k,S[n0+t]]:
X[(n0+t):(zeroIndices[n]+T[n]),k] = 1
break
X = X.astype(np.int8)
#Write pickled files
variables = [Q,pi,S,T,obs_jumps,B0,B,X,Z,L,O,anchors,comorbidityNames]
names = ['Q','pi','S','T','obs_jumps','B0','B','X','Z','L','O','anchors','comorbidityNames']
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir)
for var,name in zip(variables,names):
outfile = open(args.outdir+'/'+name+'.pkl','wb')
dump(var,outfile)
outfile.close()
| mit |
hoenirvili/distributions | distributions/poisson.py | 1 | 2592 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson
from .distribution import Distribution
__all__ = ['Poisson']
class Poisson(Distribution):
"""
A random variable X that has a Poisson distribution represents
the number of events occurring in a fixed time interval with a
rate parameters λ. λ tells you the rate at which the number of
events occur. The average and variance is λ.
Parameters:
-----------
n : int
Number of events
rate : int
The rate that the event occured
"""
def __init__(self, n, rate):
if (type(n) != int or n < 0 or n is None):
raise ValueError("Invalid number of events")
if (type(rate) != int or
rate < 0 or rate is None):
raise ValueError("Invalid rate number")
self.__rate = rate
self.__n = n
self.__all_n = np.arange(0, n)
def mean(self):
"""
Compute the mean of the distribution
Returns:
--------
mean : float
"""
return poisson.mean(self.__rate)
def variance(self):
"""
Compute the variance of the distribution
Returns:
--------
variance : float
"""
return poisson.var(self.__rate)
def std(self):
"""
Compute the standard deviation of the distribution.
Returns:
--------
std : float
"""
return poisson.var(self.__rate)
def pmf(self):
"""
Compute the probability mass function of the distribution
Returns:
--------
pmf : float
"""
return poisson.pmf(self.__n, self.__rate)
def pmfs(self):
"""
Compute the probability mass function of all the elements to n
Returns:
--------
pmf : numpy.narray
"""
return poisson.pmf(self.__all_n, self.__rate)
def cdf(self):
"""
Compute the cumulative distribution function.
Returns:
--------
cdf : float
"""
return poisson.cdf(self.__n, self.__rate)
def plot(self):
"""Plot values pmfs values of the distribution"""
pmfs = self.pmfs()
plt.plot(self.__all_n, pmfs, 'o-')
plt.title('Poisson: $\lambda$ = %i' % self.__rate)
plt.xlabel('Number of accidents')
plt.ylabel('Probability of number of accidents')
plt.show()
| mit |
nohzen/DL | ch04/train_neuralnet.py | 1 | 1781 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 10000 # 繰り返しの回数を適宜設定する
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 勾配の計算
#grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)
# パラメータの更新
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show() | mit |
jmmease/pandas | pandas/io/json/normalize.py | 3 | 9206 | # ---------------------------------------------------------------------
# JSON normalization routines
import copy
from collections import defaultdict
import numpy as np
from pandas._libs.lib import convert_json_to_lines
from pandas import compat, DataFrame
def _convert_to_line_delimits(s):
"""Helper function that converts json lists to line delimited json."""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", sep=".", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, compat.string_types):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and len(data) is 0:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise \
KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
result[k] = np.array(v).repeat(lengths)
return result
| bsd-3-clause |
orionzhou/robin | formats/excel.py | 1 | 7767 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Read and write EXCEL file.
Library dependency: pandas
"""
import os.path as op
import sys
import logging
import pandas as pd
from jcvi.apps.base import sh, mkdir
class ColorMatcher(object):
def __init__(self):
self.reset()
def reset(self):
self.unused_colors = set(self.xlwt_colors)
# Never use black.
self.unused_colors.discard((0, 0, 0))
# Culled from a table at http://www.mvps.org/dmcritchie/excel/colors.htm
xlwt_colors=[
(0,0,0), (255,255,255), (255,0,0), (0,255,0), (0,0,255), (255,255,0),
(255,0,255), (0,255,255), (0,0,0), (255,255,255), (255,0,0), (0,255,0),
(0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0),
(0,0,128), (128,128,0), (128,0,128), (0,128,128), (192,192,192),
(128,128,128), (153,153,255), (153,51,102), (255,255,204),
(204,255,255), (102,0,102), (255,128,128), (0,102,204), (204,204,255),
(0,0,128), (255,0,255), (255,255,0), (0,255,255), (128,0,128),
(128,0,0), (0,128,128), (0,0,255), (0,204,255), (204,255,255),
(204,255,204), (255,255,153), (153,204,255), (255,153,204),
(204,153,255), (255,204,153), (51,102,255), (51,204,204), (153,204,0),
(255,204,0), (255,153,0), (255,102,0), (102,102,153), (150,150,150),
(0,51,102), (51,153,102), (0,51,0), (51,51,0), (153,51,0), (153,51,102),
(51,51,153), (51,51,51)
]
@staticmethod
def color_distance(rgb1, rgb2):
# Adapted from Colour metric by Thiadmer Riemersma,
# http://www.compuphase.com/cmetric.htm
rmean = (rgb1[0] + rgb2[0]) / 2
r = rgb1[0] - rgb2[0]
g = rgb1[1] - rgb2[1]
b = rgb1[2] - rgb2[2]
return (((512 + rmean) * r * r) / 256) + 4 * g * g \
+ (((767 - rmean) * b * b) / 256)
def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
"""
from maize.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, basestring):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result
def get_unused_color(self):
"""Returns an xlwt color index that has not been previously returned by
this instance. Attempts to maximize the distance between the color and
all previously used colors.
"""
if not self.unused_colors:
# If we somehow run out of colors, reset the color matcher.
self.reset()
used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors]
result_color = max(self.unused_colors,
key=lambda c: min(self.color_distance(c, c2)
for c2 in used_colors))
result_index = self.xlwt_colors.index(result_color)
self.unused_colors.discard(result_color)
return result_index
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'read and write excel files'
)
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('csv', help='Convert EXCEL to csv file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('fi', help = 'input excel file')
sp1.add_argument('fo', help = 'output csv file')
sp1.add_argument('--sheet', default='', help='worksheet to use')
sp1.add_argument('--sep', default=',', help='separator')
sp1.set_defaults(func = csv)
sp1 = sp.add_parser('tsv', help='Convert EXCEL to tsv file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('fi', help = 'input excel file')
sp1.add_argument('fo', help = 'output csv file')
sp1.add_argument('--sheet', default='', help='worksheet to use')
sp1.add_argument('--sep', default='\t', help='separator')
sp1.set_defaults(func = csv)
sp1 = sp.add_parser('tsvs', help='Convert all worksheets in EXCEL to tsv files',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('excel', help = 'input excel file')
sp1.add_argument('--outdir', default='sheets', help='output directory')
sp1.add_argument('--sep', default='\t', help='separator')
sp1.set_defaults(func = tsvs)
sp1 = sp.add_parser('fromcsv', help='Convert csv file to EXCEL',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('csv', help = 'input csv file')
sp1.add_argument("--noheader", action="store_true",
help="Do not treat the first row as header")
sp1.add_argument("--rgb", default=-1, type=int,
help="Show RGB color box")
sp1.set_defaults(func = fromcsv)
args = parser.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
def fromcsv(args):
"""
%prog fromcsv csvfile
Convert csv file to EXCEL.
"""
from csv import reader
from xlwt import Workbook, easyxf
from maize.formats.base import flexible_cast
csvfile = args.csv
header = not args.noheader
rgb = args.rgb
excelfile = csvfile.rsplit(".", 1)[0] + ".xls"
data = []
for row in reader(open(csvfile), delimiter=args.sep):
data.append(row)
w = Workbook()
s = w.add_sheet(op.basename(csvfile))
header_style = easyxf('font: bold on')
if header:
s.panes_frozen = True
s.horz_split_pos = 1
cm = ColorMatcher()
for i, row in enumerate(data):
for j, cell in enumerate(row):
cell = flexible_cast(cell)
if header and i == 0:
s.write(i, j, cell, header_style)
else:
if j == rgb:
cix = cm.match_color_index(cell)
color_style = easyxf('font: color_index {0}'.format(cix))
s.write(i, j, cell, color_style)
else:
s.write(i, j, cell)
w.save(excelfile)
logging.debug("File written to `{0}`.".format(excelfile))
return excelfile
def csv(args):
"""
%prog csv excelfile
Convert EXCEL to csv file.
"""
import pandas as pd
fi = args.fi
fo = args.fo
sep = args.sep
sheet = 0 if args.sheet == '' else args.sheet
#suf = '.tsv' if sep == '\t' else '.csv'
#fo = fi.rsplit(".", 1)[0] + suf
df = pd.read_excel(fi, sheet_name=sheet, header=0, convert_float=True)
df.to_csv(fo, sep=sep, header=True, index=False)
def tsvs(args):
"""
%prog tsvs excelfile
Convert all worksheets in EXCEL to tsv files.
"""
excelfile = args.excel
odir = args.outdir
sep = args.sep
xl = pd.ExcelFile(excelfile)
sheets = xl.sheet_names
print("will convert %d sheets under %s" % (len(sheets), odir))
mkdir(odir)
suf = '.tsv' if sep == '\t' else '.csv'
for sheet in sheets:
fo = "%s/%s%s" % (odir, sheet, suf)
print(" writing %s" % fo)
df = pd.read_excel(excelfile, sheet_name=sheet, header=0)
df.to_csv(fo, sep=sep, header=True, index=False)
if __name__ == '__main__':
main()
| gpl-2.0 |
rseubert/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
akrherz/iem | htdocs/mec/turbine_ts.py | 1 | 3993 | """Plot 2 day Timeseries from one Turbine"""
from io import BytesIO
import datetime
import pytz
from pandas.io.sql import read_sql
import matplotlib.dates as mdates
from paste.request import parse_formvars
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn
def workflow(turbinename, ts):
"""Go main()"""
pgconn = get_dbconn("mec", user="mesonet")
cursor = pgconn.cursor()
cursor.execute(
"""SELECT unitnumber from turbines
where turbinename = %s""",
(turbinename,),
)
unitnumber = cursor.fetchone()[0]
ts1 = ts.strftime("%Y-%m-%d")
ts2 = (ts + datetime.timedelta(hours=73)).strftime("%Y-%m-%d")
df = read_sql(
"""
select coalesce(s.valid, d.valid) as stamp,
s.power as s_power, s.pitch as s_pitch,
s.yaw as s_yaw, s.windspeed as s_windspeed,
d.power as d_power, d.pitch as d_pitch,
d.yaw as d_yaw, d.windspeed as d_windspeed
from sampled_data_"""
+ unitnumber
+ """ s FULL OUTER JOIN
turbine_data_"""
+ unitnumber
+ """ d
on (d.valid = s.valid)
WHERE s.valid BETWEEN %s and %s
ORDER by stamp ASC
""",
pgconn,
params=[ts1, ts2],
)
(_, ax) = plt.subplots(4, 1, sharex=True, figsize=(8, 11))
ax[0].set_title("%s - %s Plot for Turbine: %s" % (ts1, ts2, turbinename))
ax[0].bar(
df["stamp"],
df["s_power"],
width=1.0 / 1440.0,
fc="tan",
ec="tan",
zorder=1,
label="1 Minute Sampled",
)
data = df[df["d_power"] > -10]
ax[0].scatter(
data["stamp"].values,
data["d_power"].values,
zorder=2,
marker="+",
s=40,
label="Observations",
)
ax[0].set_ylim(-50, 1600)
ax[0].legend(loc=(0.0, -0.2), ncol=2)
ax[0].set_ylabel("Power kW")
ax[0].grid(True)
# --------------------------------------------------------
ax[1].bar(
df["stamp"],
df["s_pitch"],
width=1.0 / 1440.0,
fc="tan",
ec="tan",
zorder=1,
)
data = df[df["d_pitch"] > -10]
ax[1].scatter(
data["stamp"].values,
data["d_pitch"].values,
zorder=2,
marker="+",
s=40,
)
ax[1].set_ylim(bottom=-5)
ax[1].set_ylabel("Pitch $^\circ$")
ax[1].grid(True)
# --------------------------------------------------------
ax[2].bar(
df["stamp"],
df["s_yaw"],
width=1.0 / 1440.0,
fc="tan",
ec="tan",
zorder=1,
)
data = df[df["d_yaw"] > -10]
ax[2].scatter(
data["stamp"].values, data["d_yaw"].values, zorder=2, marker="+", s=40
)
ax[2].text(0.05, -0.1, "* Uncorrected Yaw", transform=ax[2].transAxes)
ax[2].set_ylim(0, 360)
ax[2].set_yticks([0, 90, 180, 270, 360])
ax[2].set_yticklabels(["N", "E", "S", "W", "N"])
ax[2].grid(True)
ax[2].set_ylabel("Turbine Yaw")
# -----------------------------------------------------------
ax[3].bar(
df["stamp"],
df["s_windspeed"],
width=1.0 / 1440.0,
fc="tan",
ec="tan",
zorder=1,
)
data = df[df["d_windspeed"] > -10]
ax[3].scatter(
data["stamp"].values,
data["d_windspeed"].values,
zorder=2,
marker="+",
s=40,
)
ax[3].grid(True)
ax[3].set_ylabel("Wind Speed mps")
ax[3].set_ylim(bottom=0)
ax[3].xaxis.set_major_formatter(
mdates.DateFormatter(
"%-I %p\n%-d/%b", tz=pytz.timezone("America/Chicago")
)
)
def application(environ, start_response):
"""Go Main Go"""
headers = [("Content-type", "image/png")]
start_response("200 OK", headers)
form = parse_formvars(environ)
turbinename = form.get("turbinename", "I 050-350")
ts = datetime.datetime.strptime(form.get("date", "20100401"), "%Y%m%d")
workflow(turbinename, ts)
bio = BytesIO()
plt.savefig(bio)
return [bio.getvalue()]
| mit |
zfrenchee/pandas | pandas/tests/util/test_testing.py | 1 | 29041 | # -*- coding: utf-8 -*-
import pandas as pd
import pytest
import numpy as np
import sys
from pandas import Series, DataFrame
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import (assert_almost_equal, raise_with_traceback,
assert_index_equal, assert_series_equal,
assert_frame_equal, assert_numpy_array_equal,
RNGContext)
class TestAssertAlmostEqual(object):
def _assert_almost_equal_both(self, a, b, **kwargs):
assert_almost_equal(a, b, **kwargs)
assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal_both(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_almost_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_almost_equal, b, a, **kwargs)
def test_assert_almost_equal_numbers(self):
self._assert_almost_equal_both(1.1, 1.1)
self._assert_almost_equal_both(1.1, 1.100001)
self._assert_almost_equal_both(np.int16(1), 1.000001)
self._assert_almost_equal_both(np.float64(1.1), 1.1)
self._assert_almost_equal_both(np.uint32(5), 5)
self._assert_not_almost_equal_both(1.1, 1)
self._assert_not_almost_equal_both(1.1, True)
self._assert_not_almost_equal_both(1, 2)
self._assert_not_almost_equal_both(1.0001, np.int16(1))
def test_assert_almost_equal_numbers_with_zeros(self):
self._assert_almost_equal_both(0, 0)
self._assert_almost_equal_both(0, 0.0)
self._assert_almost_equal_both(0, np.float64(0))
self._assert_almost_equal_both(0.000001, 0)
self._assert_not_almost_equal_both(0.001, 0)
self._assert_not_almost_equal_both(1, 0)
def test_assert_almost_equal_numbers_with_mixed(self):
self._assert_not_almost_equal_both(1, 'abc')
self._assert_not_almost_equal_both(1, [1, ])
self._assert_not_almost_equal_both(1, object())
@pytest.mark.parametrize(
"left_dtype",
['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])
@pytest.mark.parametrize(
"right_dtype",
['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])
def test_assert_almost_equal_edge_case_ndarrays(
self, left_dtype, right_dtype):
# empty compare
self._assert_almost_equal_both(np.array([], dtype=left_dtype),
np.array([], dtype=right_dtype),
check_dtype=False)
def test_assert_almost_equal_dicts(self):
self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self._assert_not_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 3})
self._assert_not_almost_equal_both({'a': 1, 'b': 2},
{'a': 1, 'b': 2, 'c': 3})
self._assert_not_almost_equal_both({'a': 1}, 1)
self._assert_not_almost_equal_both({'a': 1}, 'abc')
self._assert_not_almost_equal_both({'a': 1}, [1, ])
def test_assert_almost_equal_dict_like_object(self):
class DictLikeObj(object):
def keys(self):
return ('a', )
def __getitem__(self, item):
if item == 'a':
return 1
self._assert_almost_equal_both({'a': 1}, DictLikeObj(),
check_dtype=False)
self._assert_not_almost_equal_both({'a': 2}, DictLikeObj(),
check_dtype=False)
def test_assert_almost_equal_strings(self):
self._assert_almost_equal_both('abc', 'abc')
self._assert_not_almost_equal_both('abc', 'abcd')
self._assert_not_almost_equal_both('abc', 'abd')
self._assert_not_almost_equal_both('abc', 1)
self._assert_not_almost_equal_both('abc', [1, ])
def test_assert_almost_equal_iterables(self):
self._assert_almost_equal_both([1, 2, 3], [1, 2, 3])
self._assert_almost_equal_both(np.array([1, 2, 3]),
np.array([1, 2, 3]))
# class / dtype are different
self._assert_not_almost_equal_both(np.array([1, 2, 3]), [1, 2, 3])
self._assert_not_almost_equal_both(np.array([1, 2, 3]),
np.array([1., 2., 3.]))
# Can't compare generators
self._assert_not_almost_equal_both(iter([1, 2, 3]), [1, 2, 3])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 4])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 3, 4])
self._assert_not_almost_equal_both([1, 2, 3], 1)
def test_assert_almost_equal_null(self):
self._assert_almost_equal_both(None, None)
self._assert_not_almost_equal_both(None, np.NaN)
self._assert_not_almost_equal_both(None, 0)
self._assert_not_almost_equal_both(np.NaN, 0)
def test_assert_almost_equal_inf(self):
self._assert_almost_equal_both(np.inf, np.inf)
self._assert_almost_equal_both(np.inf, float("inf"))
self._assert_not_almost_equal_both(np.inf, 0)
self._assert_almost_equal_both(np.array([np.inf, np.nan, -np.inf]),
np.array([np.inf, np.nan, -np.inf]))
self._assert_almost_equal_both(np.array([np.inf, None, -np.inf],
dtype=np.object_),
np.array([np.inf, np.nan, -np.inf],
dtype=np.object_))
def test_assert_almost_equal_pandas(self):
tm.assert_almost_equal(pd.Index([1., 1.1]),
pd.Index([1., 1.100001]))
tm.assert_almost_equal(pd.Series([1., 1.1]),
pd.Series([1., 1.100001]))
tm.assert_almost_equal(pd.DataFrame({'a': [1., 1.1]}),
pd.DataFrame({'a': [1., 1.100001]}))
def test_assert_almost_equal_object(self):
a = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]
b = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]
self._assert_almost_equal_both(a, b)
class TestUtilTesting(object):
def test_raise_with_traceback(self):
with tm.assert_raises_regex(LookupError, "error_text"):
try:
raise ValueError("THIS IS AN ERROR")
except ValueError as e:
e = LookupError("error_text")
raise_with_traceback(e)
with tm.assert_raises_regex(LookupError, "error_text"):
try:
raise ValueError("This is another error")
except ValueError:
e = LookupError("error_text")
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
class TestAssertNumpyArrayEqual(object):
@td.skip_if_windows
def test_numpy_array_equal_message(self):
expected = """numpy array are different
numpy array shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]))
# scalar comparison
expected = """Expected type """
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(1, 2)
expected = """expected 2\\.00000 but got 1\\.00000, with decimal 5"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(1, 2)
# array / scalar array comparison
expected = """numpy array are different
numpy array classes are different
\\[left\\]: ndarray
\\[right\\]: int"""
with tm.assert_raises_regex(AssertionError, expected):
# numpy_array_equal only accepts np.ndarray
assert_numpy_array_equal(np.array([1]), 1)
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1]), 1)
# scalar / array comparison
expected = """numpy array are different
numpy array classes are different
\\[left\\]: int
\\[right\\]: ndarray"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(1, np.array([1]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(1, np.array([1]))
expected = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([np.nan, 2, 3]),
np.array([1, np.nan, 3]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([np.nan, 2, 3]),
np.array([1, np.nan, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1\\.1, 2\\.000001\\]
\\[right\\]: \\[1\\.1, 2.0\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(
np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
# must pass
assert_almost_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
expected = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
expected = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
# allow to overwrite message
expected = """Index are different
Index shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
@td.skip_if_windows
def test_numpy_array_equal_object_message(self):
a = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')])
b = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')])
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]
\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, b)
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(a, b)
def test_numpy_array_equal_copy_flag(self):
a = np.array([1, 2, 3])
b = a.copy()
c = a.view()
expected = r'array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)'
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, b, check_same='same')
expected = r'array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)'
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, c, check_same='copy')
def test_assert_almost_equal_iterable_message(self):
expected = """Iterable are different
Iterable length are different
\\[left\\]: 2
\\[right\\]: 3"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal([1, 2], [3, 4, 5])
expected = """Iterable are different
Iterable values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal([1, 2], [1, 3])
class TestAssertIndexEqual(object):
def test_index_equal_message(self):
expected = """Index are different
Index levels are different
\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\],
labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=False)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),
('B', 3), ('B', 4)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index length are different
\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3, 4])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index classes are different
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3.0])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0000000001])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
# must success
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0001])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
# must success
assert_index_equal(idx1, idx2, check_exact=False,
check_less_precise=True)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 4])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_less_precise=True)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),
('B', 3), ('B', 4)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
def test_index_equal_metadata_message(self):
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[None\\]
\\[right\\]: \\[u?'x'\\]"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3], name='x')
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
# same name, should pass
assert_index_equal(pd.Index([1, 2, 3], name=np.nan),
pd.Index([1, 2, 3], name=np.nan))
assert_index_equal(pd.Index([1, 2, 3], name=pd.NaT),
pd.Index([1, 2, 3], name=pd.NaT))
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[nan\\]
\\[right\\]: \\[NaT\\]"""
idx1 = pd.Index([1, 2, 3], name=np.nan)
idx2 = pd.Index([1, 2, 3], name=pd.NaT)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
class TestAssertSeriesEqual(object):
def _assert_equal(self, x, y, **kwargs):
assert_series_equal(x, y, **kwargs)
assert_series_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_series_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_series_equal, b, a, **kwargs)
def test_equal(self):
self._assert_equal(Series(range(3)), Series(range(3)))
self._assert_equal(Series(list('abc')), Series(list('abc')))
def test_not_equal(self):
self._assert_not_equal(Series(range(3)), Series(range(3)) + 1)
self._assert_not_equal(Series(list('abc')), Series(list('xyz')))
self._assert_not_equal(Series(range(3)), Series(range(4)))
self._assert_not_equal(
Series(range(3)), Series(
range(3), dtype='float64'))
self._assert_not_equal(
Series(range(3)), Series(
range(3), index=[1, 2, 4]))
# ATM meta data is not checked in assert_series_equal
# self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True)
def test_less_precise(self):
s1 = Series([0.12345], dtype='float64')
s2 = Series([0.12346], dtype='float64')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
for i in range(4):
self._assert_equal(s1, s2, check_less_precise=i)
pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)
s1 = Series([0.12345], dtype='float32')
s2 = Series([0.12346], dtype='float32')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
for i in range(4):
self._assert_equal(s1, s2, check_less_precise=i)
pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)
# even less than less precise
s1 = Series([0.1235], dtype='float32')
s2 = Series([0.1236], dtype='float32')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
pytest.raises(AssertionError, assert_series_equal, s1, s2, True)
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_series_equal_message(self):
expected = """Series are different
Series length are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))
expected = """Series are different
Series values are different \\(33\\.33333 %\\)
\\[left\\]: \\[1, 2, 3\\]
\\[right\\]: \\[1, 2, 4\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),
check_less_precise=True)
class TestAssertFrameEqual(object):
def _assert_equal(self, x, y, **kwargs):
assert_frame_equal(x, y, **kwargs)
assert_frame_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_frame_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_frame_equal, b, a, **kwargs)
def test_equal_with_different_row_order(self):
# check_like=True ignores row-column orderings
df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c'])
df2 = pd.DataFrame({'A': [3, 2, 1], 'B': [6, 5, 4]},
index=['c', 'b', 'a'])
self._assert_equal(df1, df2, check_like=True)
self._assert_not_equal(df1, df2)
def test_not_equal_with_different_shape(self):
self._assert_not_equal(pd.DataFrame({'A': [1, 2, 3]}),
pd.DataFrame({'A': [1, 2, 3, 4]}))
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_empty_dtypes(self):
df1 = pd.DataFrame(columns=["col1", "col2"])
df1["col1"] = df1["col1"].astype('int64')
df2 = pd.DataFrame(columns=["col1", "col2"])
self._assert_equal(df1, df2, check_dtype=False)
self._assert_not_equal(df1, df2, check_dtype=True)
def test_frame_equal_message(self):
expected = """DataFrame are different
DataFrame shape mismatch
\\[left\\]: \\(3, 2\\)
\\[right\\]: \\(3, 1\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3]}))
expected = """DataFrame\\.index are different
DataFrame\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b', u?'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'a', u?'b', u?'d'\\], dtype='object'\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'd']))
expected = """DataFrame\\.columns are different
DataFrame\\.columns values are different \\(50\\.0 %\\)
\\[left\\]: Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'A', u?'b'\\], dtype='object'\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'b': [4, 5, 6]},
index=['a', 'b', 'c']))
expected = """DataFrame\\.iloc\\[:, 1\\] are different
DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\)
\\[left\\]: \\[4, 5, 6\\]
\\[right\\]: \\[4, 5, 7\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}))
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}),
by_blocks=True)
class TestAssertCategoricalEqual(object):
def test_categorical_equal_message(self):
expected = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
a = pd.Categorical([1, 2, 3, 4])
b = pd.Categorical([1, 2, 3, 5])
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
expected = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
a = pd.Categorical([1, 2, 4, 3], categories=[1, 2, 3, 4])
b = pd.Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
expected = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
a = pd.Categorical([1, 2, 3, 4], ordered=False)
b = pd.Categorical([1, 2, 3, 4], ordered=True)
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
class TestRNGContext(object):
def test_RNGContext(self):
expected0 = 1.764052345967664
expected1 = 1.6243453636632417
with RNGContext(0):
with RNGContext(1):
assert np.random.randn() == expected1
assert np.random.randn() == expected0
class TestLocale(object):
def test_locale(self):
if sys.platform == 'win32':
pytest.skip(
"skipping on win platforms as locale not available")
# GH9744
locales = tm.get_locales()
assert len(locales) >= 1
| bsd-3-clause |
sargas/scipy | scipy/signal/spectral.py | 3 | 13369 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, 1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/datasets/_olivetti_faces.py | 3 | 5131 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
https://cs.nyu.edu/~roweis/
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from os.path import dirname, exists, join
from os import makedirs, remove
import numpy as np
from scipy.io.matlab import loadmat
import joblib
from . import get_data_home
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ._base import _pkl_filepath
from ..utils import check_random_state, Bunch
# The original data can be found at:
# https://cs.nyu.edu/~roweis/data/olivettifaces.mat
FACES = RemoteFileMetadata(
filename='olivettifaces.mat',
url='https://ndownloader.figshare.com/files/5976027',
checksum=('b612fb967f2dc77c9c62d3e1266e0c73'
'd5fca46a4b8906c18e454d41af987794'))
def fetch_olivetti_faces(*, data_home=None, shuffle=False, random_state=0,
download_if_missing=True, return_X_y=False):
"""Load the Olivetti faces data-set from AT&T (classification).
Download it if necessary.
================= =====================
Classes 40
Samples total 400
Dimensionality 4096
Features real, between 0 and 1
================= =====================
Read more in the :ref:`User Guide <olivetti_faces_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : bool, default=False
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data, target)` instead of a `Bunch` object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.22
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: ndarray, shape (400, 4096)
Each row corresponds to a ravelled
face image of original size 64 x 64 pixels.
images : ndarray, shape (400, 64, 64)
Each row is a face image
corresponding to one of the 40 subjects of the dataset.
target : ndarray, shape (400,)
Labels associated to each face image.
Those labels are ranging from 0-39 and correspond to the
Subject IDs.
DESCR : str
Description of the modified Olivetti Faces Dataset.
(data, target) : tuple if `return_X_y=True`
.. versionadded:: 0.22
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, 'olivetti.pkz')
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
print('downloading Olivetti faces from %s to %s'
% (FACES.url, data_home))
mat_path = _fetch_remote(FACES, dirname=data_home)
mfile = loadmat(file_name=mat_path)
# delete raw .mat data
remove(mat_path)
faces = mfile['faces'].T.copy()
joblib.dump(faces, filepath, compress=6)
del mfile
else:
faces = joblib.load(filepath)
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
faces_vectorized = faces.reshape(len(faces), -1)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'olivetti_faces.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return faces_vectorized, target
return Bunch(data=faces_vectorized,
images=faces,
target=target,
DESCR=fdescr)
| bsd-3-clause |
matthewzimmer/traffic-sign-classification | zimpy/networks/keras/conv2d_maxpooling_dropout_nn.py | 2 | 2314 | # required in order to execute this file from project root.
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
# TODO: Re-construct the network and add dropout after the pooling layer.
# TODO: Compile and train the model.
from keras.layers import Dense, Activation, Flatten, Convolution2D, MaxPooling2D, Dropout
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from zimpy.datasets.german_traffic_signs import GermanTrafficSignDataset
data = GermanTrafficSignDataset()
data.configure(one_hot=True, train_validate_split_percentage=0)
X_train = data.train_orig
y_train = data.train_labels
X_train = data.normalize_data(X_train)
X_train = X_train.astype('float32')
# convert class vectors to binary class matrices
# Y_train = np_utils.to_categorical(y_train, nb_classes)
# TODO: Re-construct the network and add a convolutional layer before the first fully-connected layer.
model = Sequential(name='input')
model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, name='hidden1'))
model.add(Activation('relu'))
model.add(Dense(data.num_classes))
model.add(Activation('softmax', name='output'))
model.summary()
# TODO: Compile and train the model.
batch_size = 128
nb_classes = data.num_classes
nb_epoch = 2
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# TODO: Split some of the training data into a validation dataset.
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.15,
random_state=832224)
# TODO: Compile and train the model to measure validation accuracy.
history = model.fit(X_train, y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_val, y_val))
# score = model.evaluate(X_val, y_val, verbose=1)
# print('Validation (loss, accuracy): (%.3f, %.3f)' % (score[0], score[1]))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(history.history['val_acc'][0] > 0.9), "The validation accuracy is: %.3f" % history.history['val_acc'][0] | mit |
zimmermegan/MARDA | nltk-3.0.3/nltk/probability.py | 8 | 83570 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (additions)
# Trevor Cohn <[email protected]> (additions)
# Peter Ljunglöf <[email protected]> (additions)
# Liang Dong <[email protected]> (additions)
# Geoffrey Sampson <[email protected]> (additions)
# Ilia Kurenkov <[email protected]> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return float(self[sample]) / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print("%4s" % samples[i], end=' ')
print()
for i in range(len(samples)):
print("%4d" % freqs[i], end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / float(self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%4s" % s, end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print("%4d" % f, end=' ')
print()
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| mit |
JsNoNo/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
mxjl620/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
scosu/cbenchsuite | plot_utils.py | 1 | 22008 | #!/usr/bin/python3
# Cbenchsuite - A C benchmarking suite for Linux benchmarking.
# Copyright (C) 2013 Markus Pargmann <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as pyplt
import statistics
import operator
import re
import datetime
import json
import traceback
def property_get(properties, key):
if properties and key in properties:
return properties[key]
return None
def mean_confidence_interval(data, confidence=0.95):
mean, _, _, _, _, h = statistics.stats(data, 1-confidence)
return mean, h
class parameter_sort:
def _part_parameter(self, par):
slash = par.split('/')
equal = [i.split('=') for i in slash]
ret = []
for i in equal:
ret += [j.split(' ') for j in i]
return ret
def __init__(self, obj, *args):
if isinstance(obj, tuple):
obj = obj[0]
self.cmp_list = self._part_parameter(obj)
def _rec_cmp(self, a, b):
if isinstance(a, int):
if a < b:
return -1
if a > b:
return 1
return 0
if isinstance(a, str):
mstr = "([^0-9]*)([0-9]+(.[0-9]+|))(.*)"
am = re.match(mstr, a)
bm = re.match(mstr, b)
if not am or not bm:
if a < b:
return -1
if a == b:
return 0
return 1
if am.group(1) < bm.group(1):
return -1
if am.group(1) > bm.group(1):
return 1
ai = float(am.group(2))
bi = float(bm.group(2))
if ai < bi:
return -1
if ai > bi:
return 1
if am.group(4) < bm.group(4):
return -1
if am.group(4) > bm.group(4):
return 1
return 0
for i in range(len(a)):
if i == len(b):
return -1
ret = self._rec_cmp(a[i], b[i])
if ret != 0:
return ret
if len(a) < len(b):
return -1
return 0
def _compare(self, opt):
a = self.cmp_list
b = opt.cmp_list
return self._rec_cmp(a, b)
def __lt__(self, obj):
return self._compare(obj) < 0
def __gt__(self, obj):
return self._compare(obj) > 0
def __le__(self, obj):
return self._compare(obj) <= 0
def __ge__(self, obj):
return self._compare(obj) >= 0
def __eq__(self, obj):
return self._compare(obj) == 0
def __ne__(self, obj):
return self._compare(obj) != 0
def parameter_sort_create(obj):
return parameter_sort(obj)
def _sort_keys(keys):
return sorted(keys, key=parameter_sort_create)
def _create_figure(properties):
xsize = property_get(properties, 'xsize')
ysize = property_get(properties, 'ysize')
dpi = property_get(properties, 'dpi')
fontsize = property_get(properties, 'fontsize')
return pyplt.figure(figsize=(xsize, ysize), dpi=dpi)
def _plot_stuff(fig, ax, properties, path, legend_handles=None, legend_labels=None, nr_runs=None):
fs = property_get(properties, 'watermarkfontsize')
additional_info = ''
if nr_runs:
additional_info += 'Repetitions: ' + str(min(nr_runs)) + " - " + str(max(nr_runs))
additional_info += "\n" + datetime.date.today().isoformat()
wm = property_get(properties, 'watermark')
if wm != '':
additional_info += "\n" + wm
fig.text(1, 0, additional_info, fontsize=fs, color='black', ha='right', va='bottom', alpha=0.7)
fs = property_get(properties, 'xtickfontsize')
ax.tick_params(axis='x', which='major', labelsize=fs)
ax.tick_params(axis='x', which='minor', labelsize=fs)
fs = property_get(properties, 'ytickfontsize')
ax.tick_params(axis='y', which='major', labelsize=fs)
ax.tick_params(axis='y', which='minor', labelsize=fs)
title = property_get(properties, 'title')
if title != None:
fs = property_get(properties, 'titlefontsize')
ax.set_title(title, fontsize=fs)
x_label = property_get(properties, 'xlabel')
if x_label != None:
fs = property_get(properties, 'xlabelfontsize')
ax.set_xlabel(x_label, fontsize=fs)
y_label = property_get(properties, 'ylabel')
if y_label != None:
fs = property_get(properties, 'ylabelfontsize')
ax.set_ylabel(y_label, fontsize=fs)
ax.grid(b=properties['grid'], which=properties['grid-ticks'], axis=properties['grid-axis'])
no_legend = property_get(properties, 'no-legend')
if not no_legend:
fs = property_get(properties, 'legendfontsize')
if legend_handles and legend_labels:
handles = legend_handles
labels = legend_labels
else:
handles, labels = ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
labels = []
for k,v in by_label.items():
labels.append((k,v))
hl = sorted(labels, key=parameter_sort_create)
labels, handles = zip(*hl)
max_label = 0
for k in labels:
max_label = max(len(k), max_label)
est_len = (max_label * fs) / 30
xlims = ax.get_xlim()
x_range = xlims[1] - xlims[0]
ax.set_xlim(xmax = xlims[0] + x_range * max(1.01, 0.00155 * est_len ** 2 - 0.0135 * est_len + 1.09))
ax.legend(handles, labels, loc='center right', bbox_to_anchor=(1.13, 0.5), fancybox=True, shadow=True, fontsize=fs)
path += '.' + property_get(properties, 'file-type')
for i in range(10):
try:
fig.savefig(path)
break
except Exception as err:
if i == 9:
raise(err)
print("Failed to plot " + path + " for the " + str(i) + " time. This may be due to some parallel rendering bugs in matplotlib, trying again.")
fig.delaxes(ax)
pyplt.close(fig)
print("Generated figure " + path)
# Takes something like this:
#data_line = {
# 'x': [(0,1), (1,3), (2,2), (3,5)],
# 'y': [(0,1), (2,1), (3,2), (4,5)],
# 'z': [(0,[1,2,3,2,2,2,2]), (1,[2,3,2,2,3,3,3,3]), (2,2), (3,5)]
#}
def plot_line_chart(data, path, properties=None, fmts_arg = None, x_keys = None):
fig = _create_figure(properties)
ax = fig.add_subplot(111)
if fmts_arg == None:
fmts = [
{'color': '#348ABD', 'linestyle': '-', 'marker': ' '},
{'color': '#A60628', 'linestyle': '-', 'marker': ' '},
{'color': '#467821', 'linestyle': '-', 'marker': ' '},
{'color': '#CF4457', 'linestyle': '-', 'marker': ' '},
{'color': '#188487', 'linestyle': '-', 'marker': ' '},
{'color': '#E24A33', 'linestyle': '-', 'marker': ' '},
{'color': '#348ABD', 'linestyle': '--', 'marker': ' '},
{'color': '#A60628', 'linestyle': '--', 'marker': ' '},
{'color': '#467821', 'linestyle': '--', 'marker': ' '},
{'color': '#CF4457', 'linestyle': '--', 'marker': ' '},
{'color': '#188487', 'linestyle': '--', 'marker': ' '},
{'color': '#E24A33', 'linestyle': '--', 'marker': ' '},
{'color': '0.4', 'linestyle': '-', 'marker': ' '},
{'color': '0.4', 'linestyle': '--', 'marker': ' '},
]
else:
fmts = fmts_arg
fmtsid = 0
def _next_fmt():
nonlocal fmtsid
fmt = fmts[fmtsid%len(fmts)]
fmtsid += 1
return fmt
if x_keys == None:
x_keys = _sort_keys(data.keys())
start_offsets = [0]
ct = 0
while True:
offset = start_offsets[-1]
for k in x_keys:
if k not in data:
continue
v = data[k]
if not (isinstance(v[0][0], list) or isinstance(v[0][0], tuple)):
continue
if len(v) <= ct:
continue
offset = max(offset, start_offsets[-1] + v[ct][-1][0])
if offset == start_offsets[-1]:
break
start_offsets.append(offset)
ct += 1
if properties['line-nr-ticks'] < offset:
break
max_overall_x = None
min_overall_x = None
max_overall_y = None
min_overall_y = None
for k in x_keys:
if k not in data:
continue
fmt = _next_fmt()
if isinstance(data[k][0][0], list) or isinstance(data[k][0][0], tuple):
dats = data[k]
else:
dats = [data[k]]
for xind in range(len(dats)):
xs = []
ys = []
yerrs = []
yerr_not_zero = False
v = dats[xind]
if len(start_offsets) <= xind:
break
for pt in v:
xval = pt[0] + start_offsets[xind]
if xval > properties['line-nr-ticks']:
break
xs.append(xval)
if max_overall_x == None:
max_overall_x = xval
min_overall_x = xval
else:
max_overall_x = max(max_overall_x, xval)
min_overall_x = min(min_overall_x, xval)
if isinstance(pt[1], list):
m, h = mean_confidence_interval(pt[1])
yerrs.append(h)
ys.append(m)
yerr_not_zero = True
if max_overall_y == None:
max_overall_y = m
min_overall_y = m
else:
max_overall_y = max(max_overall_y, m)
min_overall_y = min(min_overall_y, m)
else:
ys.append(pt[1])
yerrs.append(0.0)
if max_overall_y == None:
max_overall_y = pt[1]
min_overall_y = pt[1]
else:
max_overall_y = max(max_overall_y, pt[1])
min_overall_y = min(min_overall_y, pt[1])
if yerr_not_zero:
ax.errorbar(xs, ys, yerrs, label=k, linestyle=fmt['linestyle'], marker=fmt['marker'], color=fmt['color'])
else:
ax.plot(xs, ys, label=k, linestyle=fmt['linestyle'], marker=fmt['marker'], color=fmt['color'])
ylims = ax.get_ylim()
ax.vlines(start_offsets[1:-1], ylims[0], ylims[1], linestyles='dotted')
ax.set_ylim(ylims)
try:
_plot_stuff(fig, ax, properties, path)
except Exception as err:
print(traceback.print_tb(err.__traceback__))
json.dump((data, properties), open(path + '.err.json', 'w'), indent=2)
print("Error plotting, wrote data to " + path + '.err.json')
# Works with all those structures:
#data = {'a':1, 'b':4, 'c':2}
#data = {
# 'a': {'1': 2, '2': 1, 'dummy': 4},
# 'b': {'1': 3, '2': 1, 'dummy': 3},
# 'c': {'1': 4, '2': 2, 'dummy': 1}
#}
#data = {
# 'x': {'1': {'a': 4, 'e': [1, 2, 3], 'c': 3}, '2': {'d':5, 'e': 2}, 'dummy': {'e':3}},
# 'y': {'1': {'a': 6, 'e': [1, 2, 2, 3], 'c': 1}, '2ab': {'d':5}, 'dummy': {'e':3}},
# 'z': {'1': {'a': 2, 'eaasd': [1, 2, 2, 2, 3], 'c': 4}, '2': {'d':5}, 'dummy': {'e':3}}
#}
##
# @brief Plot a bar chart with data of up to 3 dimensions
#
# @param data maximal 3 level depth of dictionary with lists of floats or floats
# as lowest children. Lists will produce errorbars. The keys of the dictionary
# are used to sort the stuff.
# @param l1_keys Level 1 keys, this specifies the order
# @param l2_keys Level 2 keys, this specifies the order
# @param l3_keys Level 3 keys, this specifies the order
# @param colors_arg
# @param confidence_arg
#
# @return
def plot_bar_chart(data, path, properties = None, l1_keys = None, l2_keys = None, l3_keys = None, colors_arg = None):
fig = _create_figure(properties)
ax = fig.add_subplot(111)
if l1_keys == None:
l1_keys = _sort_keys(data.keys())
if l2_keys == None:
l2_keys = set()
for k1, v1 in data.items():
if not isinstance(v1, dict):
continue
for k2, v2 in v1.items():
l2_keys.add(k2)
l2_keys = _sort_keys(list(l2_keys))
if l3_keys == None:
l3_keys = set()
for k1, v1 in data.items():
if not isinstance(v1, dict):
continue
for k2, v2 in v1.items():
if not isinstance(v2, dict):
continue
for k3, v3 in v2.items():
l3_keys.add(k3)
l3_keys = _sort_keys(list(l3_keys))
xxticks = []
xticks = []
nr_runs = []
confidence = property_get(properties, 'confidence')
label_colors = {}
if colors_arg == None:
colors = ['#348ABD', '#A60628', '#467821', '#CF4457', '#188487', '#E24A33', '0.3', '0.5', '0.7']
else:
colors = colors_arg
colorid = 0
x = 1
max_overall = 0
min_val = None
def _xtick(label, x_val = None):
nonlocal x
if x_val == None:
x_val = x
xxticks.append(x_val)
xticks.append(label)
def _color_get(label):
nonlocal colorid
if label in label_colors:
return False, label_colors[label]
else:
label_colors[label] = colors[colorid % len(colors)]
colorid += 1
return True, label_colors[label]
def _plt_bar(x, y, label):
nonlocal confidence
nonlocal min_val
set_label, color = _color_get(label)
if isinstance(y, list):
nonlocal max_overall
m,h = mean_confidence_interval(y, confidence)
max_overall = max(m, max_overall)
if not min_val: min_val = m
min_val = min(min_val, m)
if label == None or not set_label:
ax.bar(x, m, align="center", color = color, ecolor='r', yerr = h)
else:
ax.bar(x, m, align="center", color = color, ecolor='r', label = label, yerr = h)
nr_runs.append(len(y))
else:
max_overall = max(max_overall, y)
if not min_val: min_val = y
min_val = min(min_val, y)
if label == None or not set_label:
ax.bar(x, y, align="center", color = color)
else:
ax.bar(x, y, align="center", color = color, label = label)
nr_runs.append(1)
levels = 0
for l1_key in l1_keys:
l1_val = data[l1_key]
levels = 1
if not isinstance(l1_val, dict):
_xtick(l1_key)
_plt_bar(x, l1_val, l1_key)
x += 1
else:
_xtick(l1_key, x + len(l1_val.keys()) / 2 - 0.5)
levels = 2
for l2_key in l2_keys:
if l2_key not in l1_val:
continue
l2_val = l1_val[l2_key]
levels = 3
if not isinstance(l2_val, dict):
_plt_bar(x, l2_val, l2_key)
max_val = 0
x += 1
else:
max_val = 0
values = []
for l3_key in l3_keys:
if l3_key not in l2_val:
continue
l3_val = l2_val[l3_key]
if isinstance(l3_val, list):
max_val = max(sum(l3_val)/len(l3_val), max_val)
else:
max_val = max(l3_val, max_val)
values.append((x, l3_val, l3_key))
for l3_vals in sorted(values, key=operator.itemgetter(1), reverse=True):
_plt_bar(l3_vals[0], l3_vals[1], l3_vals[2])
fs = property_get(properties, 'barfontsize')
rot = min((len(l2_key) - 1)*30, 90)
ax.text(x, max_val + max_overall * 0.02, l2_key, horizontalalignment='center', verticalalignment='bottom', rotation=rot, fontsize = fs)
x += 1
x += 1
ax.relim()
lims = ax.get_ylim()
new_min = max(0, min_val - (lims[1] - min_val) / 4)
if levels == 3:
ax.set_ylim((new_min, lims[1] * 1.2))
else:
ax.set_ylim((new_min, lims[1]))
ax.set_xticks(xxticks)
ax.set_xticklabels(xticks, rotation=15)
try:
_plot_stuff(fig, ax, properties, path, nr_runs=nr_runs)
except Exception as err:
print(traceback.print_tb(err.__traceback__))
json.dump((data, properties), open(path + '.err.json', 'w'), indent=2)
print("Error plotting, wrote data to " + path + '.err.json')
# Works with all those structures:
#data = {'a':1, 'b':4, 'c':2}
#data = {
# 'a': {'1': 2, '2': 1, 'dummy': 4},
# 'b': {'1': 3, '2': 1, 'dummy': 3},
# 'c': {'1': 4, '2': 2, 'dummy': 1}
#}
##
# @brief Plot a bar chart with data of up to 2 dimensions
#
# @param data maximal 2 level depth of dictionary with lists of floats or floats
# as lowest children. Lists will produce errorbars. The keys of the dictionary
# are used to sort the stuff.
# @param l1_keys Level 1 keys, this specifies the order
# @param l2_keys Level 2 keys, this specifies the order
# @param colors_arg
#
# @return
def plot_box_chart(data, path, properties = None, l1_keys = None, l2_keys = None, colors_arg = None):
legend_patches = []
legend_labels = []
nr_runs = []
fig = _create_figure(properties)
ax = fig.add_subplot(111)
if l1_keys == None:
l1_keys = _sort_keys(data.keys())
if l2_keys == None:
l2_keys = set()
for k1, v1 in data.items():
if not isinstance(v1, dict):
continue
for k2, v2 in v1.items():
l2_keys.add(k2)
l2_keys = _sort_keys(list(l2_keys))
xxticks = []
xticks = []
label_colors = {}
if colors_arg == None:
colors = ['#348ABD', '#A60628', '#467821', '#CF4457', '#188487', '#E24A33', '0.3', '0.5', '0.7']
else:
colors = colors_arg
colorid = 0
x = 1
def _xtick(label, x_val = None):
nonlocal x
if x_val == None:
x_val = x
xxticks.append(x_val)
xticks.append(label)
def _color_get(label):
nonlocal colorid
if label in label_colors:
return False, label_colors[label]
else:
label_colors[label] = colors[colorid % len(colors)]
colorid += 1
return True, label_colors[label]
def _plt_bar(x, y, label):
set_label, color = _color_get(label)
if isinstance(y, list):
box = ax.boxplot([y], positions=[x], patch_artist=True, widths=0.35)
nr_runs.append(len(y))
else:
box = ax.boxplot([[y]], positions=[x], patch_artist=True, widths=0.35)
nr_runs.append(1)
patch = box['boxes'][0]
patch.set_facecolor(color)
if label != None and set_label:
legend_patches.append(patch)
legend_labels.append(label)
levels = 0
for l1_key in l1_keys:
l1_val = data[l1_key]
levels = 1
if not isinstance(l1_val, dict):
_xtick(l1_key)
_plt_bar(x, l1_val, l1_key)
x += 1
else:
_xtick(l1_key, x + len(l1_val.keys()) / 2 - 0.5)
levels = 2
for l2_key in l2_keys:
if l2_key not in l1_val:
continue
l2_val = l1_val[l2_key]
levels = 3
if not isinstance(l2_val, dict):
_plt_bar(x, l2_val, l2_key)
x += 1
x += 1
ax.set_xticks(xxticks)
ax.set_xticklabels(xticks, rotation=20)
ax.set_xlim(xmin=0.5)
lims = ax.get_ylim()
ax.set_ylim((lims[0], lims[1] + (lims[1] - lims[0]) * 0.05))
try:
_plot_stuff(fig, ax, properties, path, legend_handles=legend_patches, legend_labels=legend_labels, nr_runs=nr_runs)
except Exception as err:
print(traceback.print_tb(err.__traceback__))
json.dump((data, properties), open(path + '.err.json', 'w'), indent=2)
print("Error plotting, wrote data to " + path + '.err.json')
if __name__ == '__main__':
import sys
props = {
'confidence': 0.95,
'legend': True,
'xsize': 12,
'ysize': 7,
'dpi': 300,
'legendfontsize': 16,
'xlabelfontsize': 17,
'ylabelfontsize': 17,
'ytickfontsize': 15,
'xtickfontsize': 15,
'barfontsize': 15,
'titlefontsize': 20,
'watermark': 'Powered by cbenchsuite (http://cbenchsuite.org)',
'watermarkfontsize': 13,
'file-type': 'svg',
'line-nr-ticks': 300,
'grid-axis': 'y',
'grid-ticks': 'both',
'grid': True,
'plot-depth': 1,
}
data = json.load(open(sys.argv[1], 'r'))
plot_line_chart(data, 'test', properties=props)
| gpl-2.0 |
amirajdhawan/pokerbot | poker_nn/train_models.py | 1 | 6496 | import subprocess
import platform
import sys
import os
import shutil
sys.path.append("/home/amiraj/Dropbox/Courses/CS_6700_Advanced AI/caffe-master/python/caffe/")
sys.path.append("/home/amiraj/Dropbox/Courses/CS_6700_Advanced AI/caffe-master/python/")
import caffe
caffe.set_mode_gpu()
import lmdb
from sklearn.cross_validation import StratifiedShuffleSplit
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab
#Custom Modules
import dataload_lmdb
def main():
#Spit out system information
print "OS: ", platform.platform()
print "Python: ", sys.version.split("\n")[0]
print "CUDA: ", subprocess.Popen(["nvcc","--version"], stdout=subprocess.PIPE).communicate()[0].split("\n")[3]
print "LMDB: ", ".".join([str(i) for i in lmdb.version()])
print ""
no_cards = 0
while True:
try:
no_cards = int(raw_input("No of cards for system? [3/4/5/0] (0 for all):\t"))
if no_cards == 3 or no_cards == 4 or no_cards == 5:
break
else:
raise ValueError
except ValueError:
print "Incorrect input, try again!"
lmdb_train_path = "../dataset/lmdb/cards_" + str(no_cards) +"_train_data_lmdb"
lmdb_test_path = "../dataset/lmdb/cards_" + str(no_cards) +"_test_data_lmdb"
train_data_set = "../dataset/poker-hand-training-true.data"
test_data_set = "../dataset/poker-hand-testing.data"
caffe_path = "../../caffe-master/build/tools/caffe"
config_path = "../nn_config/cards_" + str(no_cards) + "_config/"
config_filename = "config.prototxt"
model_test_filaname = "model_test.prototxt"
caffe_nets = "../caffe_nets/cards_" + str(no_cards) + "_net/"
max_iters_3_cards = 100000
max_iters_4_cards = 100000
max_iters_5_cards = 100000
caffemodel_filename = "_iter_"
solverstate_filaname = "_iter_"
if no_cards == 3:
caffemodel_filename += str(max_iters_3_cards)
solverstate_filaname += str(max_iters_3_cards)
elif no_cards == 4:
caffemodel_filename += str(max_iters_4_cards)
solverstate_filaname += str(max_iters_4_cards)
elif no_cards == 5:
caffemodel_filename += str(max_iters_5_cards)
solverstate_filaname += str(max_iters_5_cards)
caffemodel_filename += ".caffemodel"
solverstate_filaname += ".solverstate"
while True:
load_data = raw_input("Load data into LMDB? Deletes old data if found. [y/n]:\t")
if load_data == "y" or load_data == "Y":
print "Loading data into LMDB..."
if os.path.isdir(lmdb_train_path):
shutil.rmtree(lmdb_train_path)
if os.path.isdir(lmdb_test_path):
shutil.rmtree(lmdb_test_path)
df = pd.read_csv(train_data_set, sep=",")
testing_data = pd.read_csv(test_data_set, sep=",")
training_features = df.ix[:,:(no_cards * 2)].as_matrix()
training_labels = df.ix[:,-1].as_matrix()
testing_features = testing_data.ix[:,:(no_cards * 2)].as_matrix()
testing_labels = testing_data.ix[:,-1].as_matrix()
dataload_lmdb.load_data_into_lmdb(lmdb_train_path, training_features, training_labels)
dataload_lmdb.load_data_into_lmdb(lmdb_test_path, testing_features, testing_labels)
break
elif load_data == "N" or load_data == "n":
break
else:
print "Incorrect input, try again!"
while True:
train_net = raw_input("Train the network? [y/n]:\t")
if train_net == "y" or train_net == "Y":
#dataload_lmdb.get_data_for_case_from_lmdb(lmdb_train_path, "00012345")
print "Training..."
proc = subprocess.Popen(
[caffe_path, "train", "--solver=" + config_path + config_filename],
stderr=subprocess.PIPE)
res = proc.communicate()[1]
if proc.returncode != 0:
print "Error in Caffe training!"
print res
sys.exit()
shutil.move(caffemodel_filename, caffe_nets + caffemodel_filename)
shutil.move(solverstate_filaname, caffe_nets + solverstate_filaname)
break
elif train_net == "n" or train_net == "N":
break
else:
print "Incorrect input, try again!"
while True:
test_net = raw_input("Test the caffe net? [y/n]:\t")
if test_net == "y" or test_net == "Y":
if not os.path.exists(config_path + model_test_filaname):
print "Model_test.prototxt for cards_" + str(no_cards) + "_config not found!"
break
if not os.path.exists(caffe_nets + caffemodel_filename):
print "Caffemodel for cards_" + str(no_cards) + "_net not found, first train the network for cards_"\
+ str(no_cards) + " first!"
break
print "Testing..."
net = caffe.Net(config_path + model_test_filaname, caffe_nets + caffemodel_filename, caffe.TEST)
labels, features = dataload_lmdb.get_data_for_case_from_lmdb(lmdb_test_path, "00001230")
out = net.forward(**{net.inputs[0]: np.asarray([features])})
print np.argmax(out["prob"][0]) == labels, "\n", out
plt.bar(range(10),out["prob"][0])
pylab.show()
break
elif test_net == "n" or test_net == "N":
break
else:
print "Incorrect input, try again!"
print "\n..........End of script.........."
# pylab.show()
# proc = subprocess.Popen(
# ["/home/amiraj/Dropbox/Courses/CS_6700_Advanced AI/caffe-master/build/tools/caffe","train",
# "--solver=" + config_path + config_filename],
# stderr=subprocess.PIPE)
# res = proc.communicate()[1]
# #print res
# net = caffe.Net("model_prod.prototxt","./_iter_100000.caffemodel", caffe.TEST)
# l, f = get_data_for_case_from_lmdb("/home/amiraj/Dropbox/Courses/CS_6700_Advanced AI/pokerbot/test_data_lmdb/", "00001230")
# out = net.forward(**{net.inputs[0]: np.asarray([f])})
# # if the index of the largest element matches the integer
# # label we stored for that case - then the prediction is right
# print np.argmax(out["prob"][0]) == l, "\n", out
# plt.bar(range(10),out["prob"][0])
# pylab.show()
if __name__ == "__main__":
main() | apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/core/reshape.py | 9 | 39134 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse import SparseDataFrame, SparseSeries
from pandas.sparse.array import SparseArray
from pandas._sparse import IntIndex
from pandas.core.categorical import Categorical
from pandas.core.common import notnull, _ensure_platform_int, _maybe_promote
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.common as com
import pandas.algos as algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = com.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
values = [ Categorical.from_array(values[:,i],
categories=self.is_categorical.categories,
ordered=True)
for i in range(values.shape[-1]) ]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width: (i + 1) * width]
mask_chunk = new_mask[:, i * width: (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels,
labels=result_labels,
names=self.new_index_names,
verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids,
obs_ids, shape, clabels, xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = Series(data.values, index=dummy_index)
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [val if i > val else val - 1 for val in clocs]
return result
dummy = DataFrame(data.values, index=dummy_index,
columns=data.columns)
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index,
self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
cat = Categorical(index, ordered=True)
return cat.categories, cat.codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = \
zip(*map(factorize, (frame.index, frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels,
labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level numbers, "
"not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something
we can safely pass to swaplevel:
We generally want to convert the level number into
a level name, except when columns do not have names,
in which case we must leave as a level number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None,
var_name=None, value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i for i in
range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
mdata[target] = com._concat_compat([data[col].values for col in names])
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> get_dummies(df, prefix=['col1', 'col2']):
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
See also ``Series.str.get_dummies``.
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(include=['object',
'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did "
"not match the length of the columns "
"being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False, sparse=False):
# Series avoids inconsistent NaN handling
cat = Categorical.from_array(Series(data), ordered=True)
levels = cat.categories
# if all NaN
if not dummy_na and len(levels) == 0:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index)
codes = cat.codes.copy()
if dummy_na:
codes[codes == -1] = len(cat.categories)
levels = np.append(cat.categories, np.nan)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v)
for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [ [] for _ in range(len(dummy_cols)) ]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs)), sparse_index=IntIndex(N, ixs),
fill_value=0)
sparse_series[col] = SparseSeries(data=sarr, index=index)
return SparseDataFrame(sparse_series, index=index, columns=dummy_cols)
else:
dummy_mat = np.eye(number_of_cols).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {
'major': 0,
'minor': 1
}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
cat = Categorical.from_array(mapped_items.take(labels), ordered=True)
labels = cat.codes
items = cat.categories
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| gpl-2.0 |
shusenl/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
petrinm/tgstats | generate.py | 1 | 16993 | #!/usr/bin/env python3
import argparse
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import sqlite3, json, datetime
import time, math, os, re
def chat_renames():
"""
Returns list of topics
"""
print("Getting topics...")
renames = []
for sid, timestamp, service in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="service" ORDER BY timestamp;"""):
service = json.loads(service)
if service["action"]["type"] == "chat_rename":
renames.append((
datetime.datetime.fromtimestamp(timestamp),
service["action"]["title"],
service["from"]["print_name"].replace("_", " ")
))
return sorted(renames, key=lambda x: x[0], reverse=True)
def talker_stats(span=None, max_talkers=10):
""""
Return list of top talkers in decending order
"""
print("Getting top talkers...")
i = 0
talkers = {}
before = int(time.time() - span*24*60*60) if span else 0
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" AND timestamp >= ?;""", (before, )):
message = json.loads(message)
name = message["from"]["print_name"]
if name not in talkers:
talkers[name] = [0, 0, 0, 0]
if "text" in message:
talkers[name][0] += 1
talkers[name][1] += len(re.findall('[a-zäöå]{2,}', message["text"], flags=re.IGNORECASE))
elif "media" in message:
media_type = message["media"]["type"]
if media_type == "photo":
talkers[name][3] += 1
elif media_type == "document":
talkers[name][2] += 1
elif media_type == "geo":
pass
elif media_type == "contact":
pass
return talkers.items()
def bot_spammers(max_talkers=10):
print("Getting top bot spammers...")
cmds = {}
bots = {}
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message";"""):
message = json.loads(message)
name = message["from"]["print_name"]
if "text" in message and message["text"].startswith("/"):
cmd = message["text"].strip().split(" ")[0].split("@")[0]
#print(cmd, "\t", message["text"])
if cmd in cmds:
if name in cmds[cmd]:
cmds[cmd][name] += 1
else:
cmds[cmd][name] = 1
else:
cmds[cmd] = { name: 1 }
elif name.lower()[-3:]== "bot":
# Increase bot's popularity
if name in bots:
bots[name] += 1
else:
bots[name] = 1
# Filter Top-6 commands
cmds = sorted(cmds.items(), key=lambda x: sum(x[1].values()), reverse=True)[:6]
# Filter Top-6 users for each command
cmds = [(c[0], sorted(c[1].items(), key=lambda x: x[1], reverse=True)[:6]) for c in cmds]
# Filter Top-5 Bots
bots = sorted(bots.items(), key=lambda x: x[1], reverse=True)[:5]
return cmds, bots
def most_commonly_used_words():
""""
Return list of most commonly used words
"""
print("Getting most commonly used words...")
words = {}
users = {}
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message";"""):
message = json.loads(message)
if "text" not in message:
continue
for mword in re.findall('[a-zäöå]{2,}', message["text"], flags=re.IGNORECASE):
mword = mword.lower()
if mword not in words:
words[mword] = 1
else:
words[mword] += 1
if mword[0] == "@":
if mword not in users:
users[mword] = 1
else:
users[mword] += 1
#print(sorted(users.items(), key=lambda x: x[1], reverse=True)[:10])
return sorted(words.items(), key=lambda x: x[1], reverse=True)
def population_graph(filepath="aski_population.png", show=False):
print("Creating population graph...")
population = {}
total = 0
prev_date = None
for sid, timestamp, service in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="service" ORDER BY timestamp;"""):
service = json.loads(service)
action_type = service["action"]["type"]
if action_type not in ["chat_add_user", "chat_add_user_link", "chat_del_user"]:
continue
timestamp = datetime.datetime.fromtimestamp(timestamp)
date = datetime.date(timestamp.year, timestamp.month, timestamp.day)
# Init table for the date
if date != prev_date:
population[date] = [total, 0, 0]
prev_date = date
if action_type == "chat_add_user" or action_type == "chat_add_user_link":
total += 1
population[date][0] = total
population[date][1] += 1
elif action_type == "chat_del_user":
total -= 1
population[date][0] = total
population[date][2] -= 1
# TODO: Add today to the list if doesn't exist
#if population[-1] != today:
# population[today] = [total, 0, 0]
dates = []
members = []
income = []
outcome = []
for date, vals in sorted(population.items(), key=lambda x: x[0]):
dates.append(date)
members.append(vals[0])
income.append(vals[1])
outcome.append(vals[2])
fig, ax = plt.subplots()
fig.set_size_inches(14, 6)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))
ax.set_xlim(datetime.date(dates[0].year, dates[0].month, 1), datetime.date(dates[-1].year, dates[-1].month, dates[-1].day))
ax.set_ylim(10 * math.floor(min(outcome) / 10.), 20 * math.ceil((1 + total) / 20.))
ax.plot(dates, members)
ax.bar(dates, income, color="green", edgecolor = "none")
ax.bar(dates, outcome, color="red", edgecolor = "none")
plt.xlabel('Date')
plt.ylabel('Members')
plt.title('Population')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
def hourly_rate(timespan=3600):
"""
Calculate most messages inside the timespan
"""
print("Calculating message rates...")
buff = []
top_date, top_rate = (0, 0), 0
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY timestamp;"""):
message = json.loads(message)
# Append new message to the buffer
if "text" in message:
buff.append(timestamp)
# Filter old messages
buff = [x for x in buff if x + timespan > timestamp]
if len(buff) > top_rate:
top_rate = len(buff)
top_date = (buff[0], buff[-1])
#print(top_date, top_rate, message["text"])
return top_rate, datetime.datetime.fromtimestamp(top_date[0]), \
datetime.datetime.fromtimestamp(top_date[1])
def messages_graph(filepath="messages.png", show=True):
print("Creating messages graphs...")
messages = {}
prev_date = None
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY timestamp;"""):
message = json.loads(message)
timestamp = datetime.datetime.fromtimestamp(timestamp)
date = datetime.date(timestamp.year, timestamp.month, timestamp.day)
# Init table for the date
if date != prev_date:
messages[date] = 0
prev_date = date
messages[date] += 1
dates = []
mgs = []
for date, vals in sorted(messages.items(), key=lambda x: x[0]):
dates.append(date)
mgs.append(vals)
fig, ax = plt.subplots()
fig.set_size_inches(14, 6)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))
ax.set_xlim(datetime.date(dates[0].year, dates[0].month, 1), datetime.date(dates[-1].year, dates[-1].month, dates[-1].day))
#ax.set_ylim(10 * math.floor(min(outcome) / 10.), 20 * math.ceil((1 + total) / 20.))
ax.plot(dates, mgs)
plt.xlabel('Date')
plt.ylabel('Messages')
plt.title('Messages per day')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
def popular_emojis():
print("Searching emojis...")
highpoints = re.compile(u'['
u'\U0001F300-\U0001F5FF'
u'\U0001F600-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]',
re.UNICODE)
emojis = {}
for mid, message in c.execute("""SELECT id, json FROM messages WHERE event="message";"""):
message = json.loads(message)
if "text" not in message:
continue
#if len(r) > 0:
#print(r, hex(ord(r[0])))
for ec in map(ord, highpoints.findall(message["text"])):
emojis[ec] = emojis.get(ec, 0) + 1
return sorted(emojis.items(), key=lambda x: x[1], reverse=True)[:20]
def activity_graph(filepath="activity.png", show=True):
print("Creating activity graph...")
messages = 24 * [0]
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY id;"""):
message = json.loads(message)
timestamp = datetime.datetime.fromtimestamp(timestamp)
messages[timestamp.hour] += 1
fig, ax = plt.subplots()
fig.set_size_inches(14, 4)
ax.set_xlim(0, 23)
ax.bar(list(range(0, 24)), messages)
plt.xlabel('Hours')
plt.ylabel('Messages')
plt.title('Activity')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('name', type=str, help="Will be used as database name")
parser.add_argument('--no-population', action='store_true', help="Disable population graph")
parser.add_argument('--no-messages', action='store_true', help="Disable messages graph")
parser.add_argument('--no-activity', action='store_true', help="Disable activity graph")
parser.add_argument('--no-general', action='store_true', help="Disable general stats")
parser.add_argument('--no-talkers', action='store_true', help="Disable top talkers")
parser.add_argument('--no-topics', action='store_true', help="Disable topic list")
parser.add_argument('--no-words', action='store_true', help="Disable most commonly used words list")
parser.add_argument('--no-bots', action='store_true', help="Disable most commonly used bots/commands list")
parser.add_argument('--no-emojis', action='store_true', help="Disable most commonly used emojis list")
args = parser.parse_args()
if len(args.name) < 3:
print("Invalid name!")
conn = sqlite3.connect("%s.db" % args.name)
c = conn.cursor()
# Try to create a folder
try:
os.mkdir(args.name)
except OSError:
pass
if not args.no_population:
population_graph("%s/population.png" % args.name, show=False)
if not args.no_messages:
messages_graph("%s/messages.png" % args.name, show=False)
if not args.no_activity:
activity_graph("%s/activity.png" % args.name, show=False)
out = open("%s/index.html" % args.name, "w")
out.write("""<!DOCTYPE html><html lang="en"><head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>%s Telegram Statistics</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-2.2.4.min.js" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" crossorigin="anonymous"></script>
</head><body>
<div class="container">
""" % args.name)
out.write("<h1>%s Telegram Statistics</h1>" % args.name)
if not args.no_general or not args.no_talkers:
talkers = talker_stats()
if not args.no_population:
out.write("<h2>Members</h2>\n")
out.write("<img src='population.png' class='img-responsive' alt='Population over time'/>\n")
if not args.no_messages:
out.write("<h2>Messages per day</h2>\n")
out.write("<img src='messages.png' class='img-responsive' alt='Messages per day'/>\n")
if not args.no_activity:
out.write("<h2>Activity</h2>\n")
out.write("<img src='activity.png' class='img-responsive' alt=''/>\n")
if not args.no_general:
out.write("<h2>General numbers</h2>\n<table class='table tabler-striped'>\n")
top_rate, top_start, top_end = hourly_rate()
messages = 0
stickers = 0
photos = 0
for talker, stats in talkers:
messages += stats[0]
stickers += stats[2]
photos += stats[3]
out.write("<tr><td>Messages</td><td>%d</td></tr>\n" % messages)
out.write("<tr><td>Top speed</td><td>%d messages/hour (%s-%s)</td></tr>\n" % (top_rate, top_start.strftime("%d. %B %Y %I:%M"), top_end.strftime("%I:%M")))
out.write("<tr><td>Stickers</td><td>%d (%.1f%% of messages)</td></tr>\n" % (stickers, (100.0 * stickers) / messages))
out.write("<tr><td>Media</td><td>%d (%.1f%% of messages)</td></tr>\n" % (photos, (100.0 * photos) / messages))
#out.write("<tr><td>Videos</td><td>TODO</td></tr>\n")
#out.write("<tr><td>Audio</td><td>TODO</td></tr>\n")
out.write("</table>\n")
if not args.no_talkers:
out.write("<h2>Top 15 Talkers</h2>\n")
out.write("<ul class=\"nav nav-tabs\">" \
"<li class=\"active\"><a data-toggle=\"tab\" href=\"#all\">All-time</a></li>"\
"<li><a data-toggle=\"tab\" href=\"#week\">Last week</a></li>" \
"<li><a data-toggle=\"tab\" href=\"#month\">Last month</a></li>"\
"<li><a data-toggle=\"tab\" href=\"#year\">Last year</a></li></ul>" \
"<div class=\"tab-content\">\n")
timeranges = [
("all", "active", 3600),
("week", "", 7),
("month", "", 31),
("year", "", 365)
]
for trange, active, span in timeranges:
talks = talkers if trange == "all" else talker_stats(span)
top_talkers = sorted(talks, key=lambda x: x[1][0], reverse=True)[:15]
out.write("<div id=\"%s\" class=\"tab-pane %s\"><table class='table tabler-striped'>\n" % (trange, active))
out.write("\t<tr><th>#</th><th>Talker</th><th>Messages</th><th>Words</th><th>WPM</th><th>Stickers</th><th>Media</th></tr>\n")
pos = 1
for talker, (messages, words, stickers, photos) in top_talkers:
out.write("\t<tr><td>%d</td><td>%s</td><td>%d</td><td>%d</td><td>%.1f</td><td>%d</td><td>%d</td></tr>\n" % \
(pos, talker.replace("_", " "), messages, words, words / messages, stickers, photos))
pos += 1
out.write("</table></div>\n")
if not args.no_bots:
cmds, bots = bot_spammers()
out.write("<h2>Bot spammers</h2>\n<b>Most used bots:</b> ")
for bot, count in bots:
out.write("%s (%d), " % (bot, count))
out.write("\n<table class='table'><tr>\n")
for cmd, users in cmds:
out.write("<td><b>%s</b><br/>" % cmd)
for user, count in users:
out.write("%s (%d), <br/>" % (user.replace("_", " "), count))
out.write("</td>\n")
out.write("</tr></table>\n")
if not args.no_emojis:
out.write("<h2>Most popular emojis</h2>\n")
for emoji, count in popular_emojis():
out.write("<img width=\"32px\" src=\"http://emojione.com/wp-content/uploads/assets/emojis/%x.svg\" title=\"%d uses\"/>" % (emoji, count))
if not args.no_words:
out.write("<h2>100 most commonly used words</h2>\n<p>\n")
out.write(", ".join([ "%s (%d)" % c for c in most_commonly_used_words()[:100]]))
out.write("</p>\n")
if not args.no_topics:
out.write("<h2>Latest topics</h2>\n<table class='table tabler-striped'>\n")
for timestamp, title, changer in chat_renames()[:10]:
out.write("\t<tr><td>%s</td><td>Changed by %s (%s)</td></tr>\n" % (title, changer, timestamp.strftime("%d. %B %Y %I:%M")))
# TODO: Add deltatime
out.write("</table>\n")
out.write("<p>Generated %s with <a href='https://github.com/petrinm/tgstats'>tgstats</a></p>\n" % datetime.datetime.now().strftime("%d. %B %Y %H:%M"))
out.write("\n</div>\n</body></html>")
| mit |
samuel1208/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
zorroblue/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
lhilt/scipy | scipy/integrate/_ivp/ivp.py | 2 | 27082 | from __future__ import division, print_function, absolute_import
import inspect
import numpy as np
from .bdf import BDF
from .radau import Radau
from .rk import RK23, RK45, DOP853
from .lsoda import LSODA
from scipy.optimize import OptimizeResult
from .common import EPS, OdeSolution
from .base import OdeSolver
METHODS = {'RK23': RK23,
'RK45': RK45,
'DOP853': DOP853,
'Radau': Radau,
'BDF': BDF,
'LSODA': LSODA}
MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
1: "A termination event occurred."}
class OdeResult(OptimizeResult):
pass
def prepare_events(events):
"""Standardize event functions and extract is_terminal and direction."""
if callable(events):
events = (events,)
if events is not None:
is_terminal = np.empty(len(events), dtype=bool)
direction = np.empty(len(events))
for i, event in enumerate(events):
try:
is_terminal[i] = event.terminal
except AttributeError:
is_terminal[i] = False
try:
direction[i] = event.direction
except AttributeError:
direction[i] = 0
else:
is_terminal = None
direction = None
return events, is_terminal, direction
def solve_event_equation(event, sol, t_old, t):
"""Solve an equation corresponding to an ODE event.
The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
ODE solver using some sort of interpolation. It is solved by
`scipy.optimize.brentq` with xtol=atol=4*EPS.
Parameters
----------
event : callable
Function ``event(t, y)``.
sol : callable
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
t_old, t : float
Previous and new values of time. They will be used as a bracketing
interval.
Returns
-------
root : float
Found solution.
"""
from scipy.optimize import brentq
return brentq(lambda t: event(t, sol(t)), t_old, t,
xtol=4 * EPS, rtol=4 * EPS)
def handle_events(sol, events, active_events, is_terminal, t_old, t):
"""Helper function to handle events.
Parameters
----------
sol : DenseOutput
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
events : list of callables, length n_events
Event functions with signatures ``event(t, y)``.
active_events : ndarray
Indices of events which occurred.
is_terminal : ndarray, shape (n_events,)
Which events are terminal.
t_old, t : float
Previous and new values of time.
Returns
-------
root_indices : ndarray
Indices of events which take zero between `t_old` and `t` and before
a possible termination.
roots : ndarray
Values of t at which events occurred.
terminate : bool
Whether a terminal event occurred.
"""
roots = [solve_event_equation(events[event_index], sol, t_old, t)
for event_index in active_events]
roots = np.asarray(roots)
if np.any(is_terminal[active_events]):
if t > t_old:
order = np.argsort(roots)
else:
order = np.argsort(-roots)
active_events = active_events[order]
roots = roots[order]
t = np.nonzero(is_terminal[active_events])[0][0]
active_events = active_events[:t + 1]
roots = roots[:t + 1]
terminate = True
else:
terminate = False
return active_events, roots, terminate
def find_active_events(g, g_new, direction):
"""Find which event occurred during an integration step.
Parameters
----------
g, g_new : array_like, shape (n_events,)
Values of event functions at a current and next points.
direction : ndarray, shape (n_events,)
Event "direction" according to the definition in `solve_ivp`.
Returns
-------
active_events : ndarray
Indices of events which occurred during the step.
"""
g, g_new = np.asarray(g), np.asarray(g_new)
up = (g <= 0) & (g_new >= 0)
down = (g >= 0) & (g_new <= 0)
either = up | down
mask = (up & (direction > 0) |
down & (direction < 0) |
either & (direction == 0))
return np.nonzero(mask)[0]
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
events=None, vectorized=False, args=None, **options):
"""Solve an initial value problem for a system of ODEs.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
Some of the solvers support integration in the complex domain, but note
that for stiff ODE solvers, the right-hand side must be
complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
To solve a problem in the complex domain, pass y0 with a complex data type.
Another option always available is to rewrite your problem for real and
imaginary parts separately.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar, and there are two options for the ndarray `y`:
It can either have shape (n,); then `fun` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then `fun`
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in `y`. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for stiff solvers).
t_span : 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0 : array_like, shape (n,)
Initial state. For problems in the complex domain, pass `y0` with a
complex data type (even if the initial value is purely real).
method : string or `OdeSolver`, optional
Integration method to use:
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate
formula (local extrapolation is done). A quartic interpolation
polynomial is used for the dense output [2]_. Can be applied in
the complex domain.
* 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
* 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
Python implementation of the "DOP853" algorithm originally
written in Fortran [14]_. A 7-th order interpolation polynomial
accurate to 7-th order is used for the dense output.
Can be applied in the complex domain.
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
order 5 [4]_. The error is controlled with a third-order accurate
embedded formula. A cubic polynomial which satisfies the
collocation conditions is used for the dense output.
* 'BDF': Implicit multi-step variable-order (1 to 5) method based
on a backward differentiation formula for the derivative
approximation [5]_. The implementation follows the one described
in [6]_. A quasi-constant step scheme is used and accuracy is
enhanced using the NDF modification. Can be applied in the
complex domain.
* 'LSODA': Adams/BDF method with automatic stiffness detection and
switching [7]_, [8]_. This is a wrapper of the Fortran solver
from ODEPACK.
Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
for non-stiff problems and implicit methods ('Radau', 'BDF') for
stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
for solving with high precision (low values of `rtol` and `atol`).
If not sure, first try to run 'RK45'. If it makes unusually many
iterations, diverges, or fails, your problem is likely to be stiff and
you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
choice, but it might be somewhat less convenient to work with as it
wraps old Fortran code.
You can also pass an arbitrary class derived from `OdeSolver` which
implements the solver.
t_eval : array_like or None, optional
Times at which to store the computed solution, must be sorted and lie
within `t_span`. If None (default), use points selected by the solver.
dense_output : bool, optional
Whether to compute a continuous solution. Default is False.
events : callable, or list of callables, optional
Events to track. If None (default), no events will be tracked.
Each event occurs at the zeros of a continuous function of time and
state. Each function must have the signature ``event(t, y)`` and return
a float. The solver will find an accurate value of `t` at which
``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all
zeros will be found. The solver looks for a sign change over each step,
so if multiple zero crossings occur within one step, events may be
missed. Additionally each `event` function might have the following
attributes:
terminal: bool, optional
Whether to terminate integration if this event occurs.
Implicitly False if not assigned.
direction: float, optional
Direction of a zero crossing. If `direction` is positive,
`event` will only trigger when going from negative to positive,
and vice versa if `direction` is negative. If 0, then either
direction will trigger event. Implicitly 0 if not assigned.
You can assign attributes like ``event.terminal = True`` to any
function in Python.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
args : tuple, optional
Additional arguments to pass to the user-defined functions. If given,
the additional arguments are passed to all user-defined functions.
So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
then `jac` (if given) and any event functions must have the same
signature, and `args` must be a tuple of length 3.
options
Options passed to a chosen solver. All options available for already
implemented solvers are listed below.
first_step : float or None, optional
Initial step size. Default is `None` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float or array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : array_like, sparse_matrix, callable or None, optional
Jacobian matrix of the right-hand side of the system with respect
to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``. There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant. Not supported by 'LSODA'.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : array_like, sparse matrix or None, optional
Defines a sparsity structure of the Jacobian matrix for a finite-
difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few
non-zero elements in *each* row, providing the sparsity structure
will greatly speed up the computations [10]_. A zero entry means that
a corresponding element in the Jacobian is always zero. If None
(default), the Jacobian is assumed to be dense.
Not supported by 'LSODA', see `lband` and `uband` instead.
lband, uband : int or None, optional
Parameters defining the bandwidth of the Jacobian for the 'LSODA'
method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
Default is None. Setting these requires your jac routine to return the
Jacobian in the packed format: the returned array must have ``n``
columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
The same format is used in `scipy.linalg.solve_banded` (check for an
illustration). These parameters can be also used with ``jac=None`` to
reduce the number of Jacobian elements estimated by finite differences.
min_step : float, optional
The minimum allowed step size for 'LSODA' method.
By default `min_step` is zero.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
.. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
.. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [5] `Backward Differentiation Formula
<https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
on Wikipedia.
.. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [8] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
.. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
Wikipedia.
.. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
.. [11] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
.. [12] `Lotka-Volterra equations
<https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
on Wikipedia.
.. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [14] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
Examples
--------
Basic exponential decay showing automatically chosen time points.
>>> from scipy.integrate import solve_ivp
>>> def exponential_decay(t, y): return -0.5 * y
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
>>> print(sol.t)
[ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
8.33328988 10. ]
>>> print(sol.y)
[[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
0.03107158 0.01350781]
[4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
0.06214316 0.02701561]
[8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
0.12428631 0.05403123]]
Specifying points where the solution is desired.
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
... t_eval=[0, 1, 2, 4, 10])
>>> print(sol.t)
[ 0 1 2 4 10]
>>> print(sol.y)
[[2. 1.21305369 0.73534021 0.27066736 0.01350938]
[4. 2.42610739 1.47068043 0.54133472 0.02701876]
[8. 4.85221478 2.94136085 1.08266944 0.05403753]]
Cannon fired upward with terminal event upon impact. The ``terminal`` and
``direction`` fields of an event are applied by monkey patching a function.
Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
at position 0 with velocity +10. Note that the integration never reaches
t=100 because the event is terminal.
>>> def upward_cannon(t, y): return [y[1], -0.5]
>>> def hit_ground(t, y): return y[0]
>>> hit_ground.terminal = True
>>> hit_ground.direction = -1
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
>>> print(sol.t_events)
[array([40.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
Use `dense_output` and `events` to find position, which is 100, at the apex
of the cannonball's trajectory. Apex is not defined as terminal, so both
apex and hit_ground are found. There is no information at t=20, so the sol
attribute is used to evaluate the solution. The sol attribute is returned
by setting ``dense_output=True``.
>>> def apex(t,y): return y[1]
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
... events=(hit_ground, apex), dense_output=True)
>>> print(sol.t_events)
[array([40.]), array([20.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
>>> print(sol.sol(sol.t_events[1][0]))
[100. 0.]
As an example of a system with additional parameters, we'll implement
the Lotka-Volterra equations [12]_.
>>> def lotkavolterra(t, z, a, b, c, d):
... x, y = z
... return [a*x - b*x*y, -c*y + d*x*y]
...
We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
argument.
>>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
... dense_output=True)
Compute a dense solution and plot it.
>>> t = np.linspace(0, 15, 300)
>>> z = sol.sol(t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, z.T)
>>> plt.xlabel('t')
>>> plt.legend(['x', 'y'], shadow=True)
>>> plt.title('Lotka-Volterra System')
>>> plt.show()
"""
if method not in METHODS and not (
inspect.isclass(method) and issubclass(method, OdeSolver)):
raise ValueError("`method` must be one of {} or OdeSolver class."
.format(METHODS))
t0, tf = float(t_span[0]), float(t_span[1])
if args is not None:
# Wrap the user's fun (and jac, if given) in lambdas to hide the
# additional parameters. Pass in the original fun as a keyword
# argument to keep it in the scope of the lambda.
fun = lambda t, x, fun=fun: fun(t, x, *args)
jac = options.get('jac')
if callable(jac):
options['jac'] = lambda t, x: jac(t, x, *args)
if t_eval is not None:
t_eval = np.asarray(t_eval)
if t_eval.ndim != 1:
raise ValueError("`t_eval` must be 1-dimensional.")
if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
raise ValueError("Values in `t_eval` are not within `t_span`.")
d = np.diff(t_eval)
if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
raise ValueError("Values in `t_eval` are not properly sorted.")
if tf > t0:
t_eval_i = 0
else:
# Make order of t_eval decreasing to use np.searchsorted.
t_eval = t_eval[::-1]
# This will be an upper bound for slices.
t_eval_i = t_eval.shape[0]
if method in METHODS:
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if t_eval is None:
ts = [t0]
ys = [y0]
elif t_eval is not None and dense_output:
ts = []
ti = [t0]
ys = []
else:
ts = []
ys = []
interpolants = []
events, is_terminal, event_dir = prepare_events(events)
if events is not None:
if args is not None:
# Wrap user functions in lambdas to hide the additional parameters.
# The original event function is passed as a keyword argument to the
# lambda to keep the original function in scope (i.e. avoid the
# late binding closure "gotcha").
events = [lambda t, x, event=event: event(t, x, *args)
for event in events]
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
else:
t_events = None
status = None
while status is None:
message = solver.step()
if solver.status == 'finished':
status = 0
elif solver.status == 'failed':
status = -1
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if events is not None:
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if active_events.size > 0:
if sol is None:
sol = solver.dense_output()
root_indices, roots, terminate = handle_events(
sol, events, active_events, is_terminal, t_old, t)
for e, te in zip(root_indices, roots):
t_events[e].append(te)
if terminate:
status = 1
t = roots[-1]
y = sol(t)
g = g_new
if t_eval is None:
ts.append(t)
ys.append(y)
else:
# The value in t_eval equal to t will be included.
if solver.direction > 0:
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
# It has to be done with two slice operations, because
# you can't slice to 0-th element inclusive using backward
# slicing.
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
if t_eval_step.size > 0:
if sol is None:
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
if t_eval is not None and dense_output:
ti.append(t)
message = MESSAGES.get(status, message)
if t_events is not None:
t_events = [np.asarray(te) for te in t_events]
if t_eval is None:
ts = np.array(ts)
ys = np.vstack(ys).T
else:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
if t_eval is None:
sol = OdeSolution(ts, interpolants)
else:
sol = OdeSolution(ti, interpolants)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, nfev=solver.nfev,
njev=solver.njev, nlu=solver.nlu, status=status,
message=message, success=status >= 0)
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
YzPaul3/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_smallcatRF.py | 8 | 1954 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def smallcatRF():
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
#Log.info("Importing alphabet_cattest.csv data...\n")
alphabet = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"))
alphabet["y"] = alphabet["y"].asfactor()
#Log.info("Summary of alphabet_cattest.csv from H2O:\n")
#alphabet.summary()
# Prepare data for scikit use
trainData = np.loadtxt(pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"), delimiter=',', skiprows=1,
converters={0:lambda s: ord(s.decode().split("\"")[1])})
trainDataResponse = trainData[:,1]
trainDataFeatures = trainData[:,0]
# Train H2O GBM Model:
#Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
rf_h2o = H2ORandomForestEstimator(ntrees=1, max_depth=1, nbins=100)
rf_h2o.train(x='X', y="y", training_frame=alphabet)
# Train scikit GBM Model:
# Log.info("scikit GBM with same parameters:")
rf_sci = ensemble.RandomForestClassifier(n_estimators=1, criterion='entropy', max_depth=1)
rf_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# h2o
rf_perf = rf_h2o.model_performance(alphabet)
auc_h2o = rf_perf.auc()
# scikit
auc_sci = roc_auc_score(trainDataResponse, rf_sci.predict_proba(trainDataFeatures[:,np.newaxis])[:,1])
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(smallcatRF)
else:
smallcatRF()
| apache-2.0 |
frederick623/pb | ul_automation/FeeCalc.py | 2 | 5218 | import requests
import re
import decimal
import math
import os
import fnmatch
import pandas as pd
import codecs
import math
import traceback
class FeeCalc:
def __init__(self):
self.PATH_DICT = {
"hkg_etf_url" : "https://www.hkex.com.hk/eng/etfrc/ListOfAllETF/ETFList.csv",
"hkg_lni_url" : "https://www.hkex.com.hk/eng/liprc/ListOfAllLIP/LIPList.csv",
"hkg_etf_file" : "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\ETFList.csv",
"hkg_lni_file" : "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\LIPList.csv",
"pbt_path" : "S:\\Prime Brokerage (PB)\\Tools\\ULLink Tool",
"pbt_name" : "pb_comm_matrix.xlsx",
}
self.HKG_EXM_ARR = self.hkg_stockcode_wo_stamp()
self.COMM_DF = self.init_contract_comm()
return
def round_half_up(self, val, digit):
tmp_val = round(val, digit+4)
return math.floor(float(str(tmp_val))*pow(10, digit)+0.5)/pow(10, digit)
def init_contract_comm(self):
pbt_file = os.path.join(self.PATH_DICT["pbt_path"], self.PATH_DICT["pbt_name"])
comm_xlsx = pd.ExcelFile(pbt_file)
dma_comm = comm_xlsx.parse('matrix')
dma_comm.columns = [re.sub(r"[\*\.#/\$%\"\(\)& \_]", "", c) for c in dma_comm.columns]
dma_comm = dma_comm.fillna(0)
return dma_comm
def parse_hkex_url(self, url, regex_pattern, split_by, nth_ele, encoding, io_file):
arr = []
reader = []
text = ''
try:
response = requests.get(url)
text = response.content.decode(encoding)
fp = open(io_file, 'w', encoding=encoding, newline=u'\n')
fp.write(text)
except:
print ("Connection failed. Retrieve old file instead")
# traceback.print_exc()
fp = codecs.open(io_file, 'r', encoding=encoding)
for x in fp:
text += x
arr = [ int((ele.split(split_by))[nth_ele].strip("\"")) for ele in re.findall(regex_pattern, text) ]
return arr
def hkg_stockcode_wo_stamp(self):
eqt_arr = []
etf_arr = self.parse_hkex_url(self.PATH_DICT["hkg_etf_url"], "\"\d\d\d\d\d\"", '\t', 0, 'utf_16_le', self.PATH_DICT["hkg_etf_file"])
lni_arr = self.parse_hkex_url(self.PATH_DICT["hkg_lni_url"], "\"\d\d\d\d\d\"", '\t', 0, 'utf_16_le', self.PATH_DICT["hkg_lni_file"])
return etf_arr + lni_arr
def fee_calc_hk(self, contract_no, price, qty, stockcode, handlinginstruction):
notional = float(price)*abs(float(qty))
comm_rate = self.COMM_DF.loc[(self.COMM_DF.ContractsNo==contract_no) & (self.COMM_DF.HandlingInstruction==handlinginstruction), "HK"].iloc[0]
commission = self.round_half_up(notional*comm_rate, 2)
trading_fee = self.round_half_up(notional*0.00005, 2)
transaction_levy = self.round_half_up(notional*0.000027, 2)
stamp_duty = 0 if int(stockcode) in self.HKG_EXM_ARR else math.ceil(notional*0.001)
return commission, trading_fee, transaction_levy, stamp_duty
def fee_calc_jp(self, contract_no, price, qty, stockcode, handlinginstruction):
notional = float(price)*abs(float(qty))
comm_rate = self.COMM_DF.loc[(self.COMM_DF.ContractsNo==contract_no) & (self.COMM_DF.HandlingInstruction==handlinginstruction), "Japan"].iloc[0]
commission = int(self.round_half_up(notional*comm_rate, 0))
trading_fee = 0
transaction_levy = 0
stamp_duty = 0
return commission, trading_fee, transaction_levy, stamp_duty
def fee_calc_sc(self, contract_no, price, qty, stockcode, handlinginstruction):
notional = float(price)*abs(float(qty))
comm_rate = self.COMM_DF.loc[(self.COMM_DF.ContractsNo==contract_no) & (self.COMM_DF.HandlingInstruction==handlinginstruction), "StockConnect"].iloc[0]
commission = notional*comm_rate
trading_fee = 0
transaction_levy = self.round_half_up(notional*0.0001087, 2)
stamp_duty = self.round_half_up(notional*0.001, 2) if float(qty) < 0 else 0
return commission, trading_fee, transaction_levy, stamp_duty
def fee_calc_us(self, contract_no, price, qty, stockcode, handlinginstruction):
notional = float(price)*abs(float(qty))
comm_rate = self.COMM_DF.loc[(self.COMM_DF.ContractsNo==contract_no) & (self.COMM_DF.HandlingInstruction==handlinginstruction), "US"].iloc[0]
commission = int(abs(float(qty))*comm_rate)
trading_fee = min(0.01, 0.0000231*notional) + min(5.95, min(0.01, 0.000119*abs(float(qty))))
transaction_levy = 0
stamp_duty = 0
return commission, trading_fee, transaction_levy, stamp_duty
def fee_calc(self, contract_no, price, qty, stockcode, market, handlinginstruction):
if market == "XHKG":
return self.fee_calc_hk(contract_no, price, qty, stockcode, handlinginstruction)
elif market == "XTKS":
return self.fee_calc_jp(contract_no, price, qty, stockcode, handlinginstruction)
elif market == "SHSC" or market == "SZSC" or market == "XSSC" or market == "XSEC":
return self.fee_calc_sc(contract_no, price, qty, stockcode, handlinginstruction)
elif market == "XNYS" or market == "XNGS":
return self.fee_calc_us(contract_no, price, qty, stockcode, handlinginstruction)
else:
return 0, 0, 0, 0
def test(self):
print (self.fee_calc("DPS_20171204001", "1.17", "22000", "420", "XHKG", "DSA"))
return
# if __name__ == "__main__":
# print ("Fee Module")
# try:
# x = FeeCalc()
# x.test()
# except KeyboardInterrupt:
# print ("Ctrl+C pressed. Stopping...") | apache-2.0 |
smarden1/airflow | airflow/hooks/sqlite_hook.py | 3 | 2945 | import logging
import sqlite3
from airflow.hooks.base_hook import BaseHook
class SqliteHook(BaseHook):
"""
Interact with SQLite.
"""
def __init__(
self, sqlite_conn_id='sqlite_default'):
self.sqlite_conn_id = sqlite_conn_id
def get_conn(self):
"""
Returns a sqlite connection object
"""
conn = self.get_connection(self.sqlite_conn_id)
conn = sqlite3.connect(conn.host)
return conn
def run(self, sql):
"""
Runs a command
>>> h = SqliteHook()
>>> sql = "CREATE TABLE IF NOT EXISTS test_table (i INTEGER);"
>>> h.run(sql)
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
def insert_rows(self, table, rows, target_fields=None):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
>>> h = SqliteHook()
>>> h.insert_rows('test_table', [[1]])
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
if isinstance(cell, basestring):
l.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
l.append('NULL')
else:
l.append(str(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
conn.commit()
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
>>> h = SqliteHook()
>>> sql = "SELECT * FROM test_table WHERE i=1 LIMIT 1;"
>>> h.get_records(sql)
[(1,)]
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_pandas_df(self, sql):
"""
Executes the sql and returns a pandas dataframe
>>> h = SqliteHook()
>>> sql = "SELECT * FROM test_table WHERE i=1 LIMIT 1;"
>>> h.get_pandas_df(sql)
i
0 1
"""
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn)
conn.close()
return df
| apache-2.0 |
nhejazi/project-gamma | code/network_analysis.py | 2 | 9789 | """
Script for inter-netowrk and intra-network connectivity analysis.
"""
from __future__ import division
import project_config
from conv import conv_target_non_target, conv_std
from stimuli_revised import events2neural_std
from gaussian_filter import spatial_smooth
from general_utils import prepare_standard_img, prepare_mask, prepare_standard_data, form_cond_filepath
from os.path import join
from connectivity_utils import c_between, c_within, permute
import numpy as np
import os
import math
import nibabel as nib
import numpy.linalg as npl
import roi_extraction
from ggplot import *
import pandas as pd
import random
def create_f (task, dic, namelist, find_nw):
con_group = "con"
scz_group = 'scz'
sub_dic_con = dic[task][con_group]
sub_dic_scz = dic[task][scz_group]
corrs = np.array([])
network = np.array([])
for name in namelist:
corrs = np.append(corrs, np.ravel(sub_dic_con[name]))
network =np.append(network, [find_nw[name] + ",con"]*len(np.ravel(sub_dic_con[name])))
corrs = np.append(corrs, np.ravel(sub_dic_scz[name]))
network =np.append(network, [find_nw[name] + ",scz"]*len(np.ravel(sub_dic_scz[name])))
data_f = pd.DataFrame(corrs)
data_f['networks']=network
data_f.columns = ['corrs','networks']
return data_f
def generate_connectivity_results(connectivity_results, output_filename):
find_nw = {}
find_nw['Cerebellar-Default']='bDMN-CER'
find_nw['Cerebellar-Cingulo-Opercular']='bCO-CER'
find_nw['Cingulo-Opercular-Default']='bDMN-CO'
find_nw['Default']='wDMN'
find_nw['Cerebellar-Fronto-Parietal']='bFP-CER'
find_nw['Cingulo-Opercular']='wCO'
find_nw['Default-Fronto-Parietal']='bDMN-FP'
find_nw['Fronto-Parietal']='wFP'
find_nw['Cerebellar']='wCER'
find_nw['Cingulo-Opercular-Fronto-Parietal']='bFP-CO'
between_namelist = ['Cerebellar-Default','Cerebellar-Cingulo-Opercular','Cingulo-Opercular-Default'
,'Cerebellar-Fronto-Parietal','Default-Fronto-Parietal','Cingulo-Opercular-Fronto-Parietal']
within_namelist = ['Default','Fronto-Parietal','Cerebellar','Cingulo-Opercular']
f_within = create_f ('003', connectivity_results, within_namelist, find_nw)
plt1 = ggplot(f_within, aes(x='corrs', y='networks')) +\
geom_boxplot()+\
ggtitle("Within-Network Correlations in CON and SCZ Group")+\
xlab("Correlation")+\
ylab("Networks")+\
scale_x_continuous(limits=(-1.0, 1.0))
ggsave(plt1, os.path.join(output_filename, "within_network_connectivity_plot.png"))
f_between = create_f('003', connectivity_results, between_namelist, find_nw)
plt2 = ggplot(f_between, aes(x='corrs', y='networks')) +\
geom_boxplot()+\
ggtitle("Between-Network Correlations in CON and SCZ Group")+\
xlab("Correlation")+\
ylab("Networks")+\
scale_x_continuous(limits=(-1.0, 1.0))
ggsave(plt2, os.path.join(output_filename, "inter_network_connectivity_plot.png"))
def expand_dic(dic, mm_to_vox, roi_extractor):
expanded_dic = {}
for i in dic.keys():
expanded_dic[i] = {}
for roi_name in dic[i].keys():
expanded_dic[i][roi_name] = roi_extractor.get_voxels(mm_to_vox, dic[i][roi_name])
return expanded_dic
def preprocessing_pipeline(subject_num, task_num, standard_source_prefix, cond_filepath_prefix):
img = prepare_standard_img(subject_num, task_num, standard_source_prefix)
data = img.get_data()[..., 5:]
n_trs = data.shape[-1] + 5
cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)
target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]
block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]
block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]
linear_drift = np.linspace(-1, 1, n_trs)
qudratic_drift = linear_drift ** 2
qudratic_drift -= np.mean(qudratic_drift)
linear_drift = linear_drift[5:]
qudratic_drift = qudratic_drift[5:]
in_brain_mask, _ = prepare_mask(data, CUTOFF)
pad_thickness = 2.0
sigma = 2.0
b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
in_brain_tcs = b_vols[in_brain_mask]
Y = in_brain_tcs.T
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
n_betas = 10
X = np.ones((n_trs - 5, n_betas))
X[:, 0] = target_convolved
X[:, 1] = nontarget_convolved
X[:, 2] = error_convolved
X[:, 3] = block_regressor
X[:, 4] = block_start_cues
X[:, 5] = block_end_cues
X[:, 6] = linear_drift
X[:, 7] = qudratic_drift
X[:, 8] = U[:,0]
# 9th column is the intercept
B = npl.pinv(X).dot(Y)
residuals = in_brain_tcs - X.dot(B).T
B[(3,4,5,6,7,8,9),:] = 0
# project Y onto the functional betas
functional_Y = X.dot(B).T
b_vols = np.zeros((data.shape))
b_vols[in_brain_mask, :] = functional_Y + residuals
return b_vols, img, in_brain_mask
def subject_c_values(img, data, dist_from_center, dic, in_brain_mask):
mm_to_vox = npl.inv(img.affine)
roi_extractor = roi_extraction.SphereExtractor(in_brain_mask, dist_from_center)
expanded_dic = expand_dic(dic, mm_to_vox, roi_extractor)
mean_c_values = c_within(data, expanded_dic)
mean_c_values.update(c_between(data, expanded_dic))
return mean_c_values
def group_c_values(standard_group_source_prefix, cond_filepath_prefix, dist_from_center, dic, group_info):
task_nums = ("001", "002", "003")
# store layout
# level 1: task (0-back, 1-back, 2-back)
# level 2: group name (CON, SCZ)
# level 3: network name
# level 4: a list of ROI-ROI correlations
c_values_store = {"001":{"con":{}, "scz":{}},
"002":{"con":{}, "scz":{}},
"003":{"con":{}, "scz":{}}}
for group, subject_nums in group_info.items():
for sn in subject_nums:
for tn in task_nums:
data, img, in_brain_mask = preprocessing_pipeline(sn, tn, standard_group_source_prefix, cond_filepath_prefix)
mean_c_values_per_net_pair = subject_c_values(img, data, dist_from_center, dic, in_brain_mask)
for network_pair_name, c_value in mean_c_values_per_net_pair.items():
group_name = "con" if group in ("fmri_con", "fmri_con_sib") else "scz"
if network_pair_name not in c_values_store[tn][group_name]:
c_values_store[tn][group_name][network_pair_name] = [c_value]
else:
c_values_store[tn][group_name][network_pair_name].append(c_value)
return c_values_store
if __name__ == "__main__":
dic = roi_extraction.dic
dist_from_center = 4
CUTOFF = project_config.MNI_CUTOFF
TR = project_config.TR
standard_group_source_prefix = os.path.join(os.path.dirname(__file__), "..", "data", "preprocessed")
cond_filepath_prefix = os.path.join(os.path.dirname(__file__), "..", "data", "condition_files")
output_filename = os.path.join(os.path.dirname(__file__), "..", "results")
small_group_info = {"fmri_con":("011", "012", "015", "035", "036", "037"),
"fmri_con_sib":("010", "013", "014", "021", "022", "038"),
"fmri_scz":("007", "009", "017", "031"),
"fmri_scz_sib":("006", "008", "018", "024")}
c_values_store = group_c_values(standard_group_source_prefix, cond_filepath_prefix, dist_from_center, dic, small_group_info)
generate_connectivity_results(c_values_store, output_filename)
# change target r-values into list format
con_dmn_cer = np.ravel(c_values_store["003"]["con"]["Cerebellar-Default"]).tolist()
scz_dmn_cer = np.ravel(c_values_store["003"]["scz"]["Cerebellar-Default"]).tolist()
con_cer_co = np.ravel(c_values_store["003"]["con"]["Cerebellar-Cingulo-Opercular"]).tolist()
scz_cer_co = np.ravel(c_values_store["003"]["scz"]["Cerebellar-Cingulo-Opercular"]).tolist()
con_dmn_co = np.ravel(c_values_store["003"]["con"]["Cingulo-Opercular-Default"]).tolist()
scz_dmn_co = np.ravel(c_values_store["003"]["scz"]["Cingulo-Opercular-Default"]).tolist()
con_fp_cer = np.ravel(c_values_store["003"]["con"]["Cerebellar-Fronto-Parietal"]).tolist()
scz_fp_cer = np.ravel(c_values_store["003"]["scz"]["Cerebellar-Fronto-Parietal"]).tolist()
con_dmn_fp = np.ravel(c_values_store["003"]["con"]["Default-Fronto-Parietal"]).tolist()
scz_dmn_fp = np.ravel(c_values_store["003"]["scz"]["Default-Fronto-Parietal"]).tolist()
con_fp_co = np.ravel(c_values_store["003"]["con"]["Cingulo-Opercular-Fronto-Parietal"]).tolist()
scz_fp_co = np.ravel(c_values_store["003"]["scz"]["Cingulo-Opercular-Fronto-Parietal"]).tolist()
# perform permutation test
dmn_cer_p_value = permute(scz_dmn_cer,con_dmn_cer)
cer_co_p_value = permute(scz_cer_co,con_cer_co)
dmn_co_p_value = permute(scz_dmn_co,con_dmn_co)
fp_cer_p_value = permute(scz_fp_cer,con_fp_cer)
dmn_fp_p_value = permute(scz_dmn_fp,con_dmn_fp)
fp_co_p_value = permute(scz_fp_co,con_fp_co)
permute_results = {"bDMN-CER": dmn_cer_p_value, "bCO-CER": cer_co_p_value, "bDMN-CO": dmn_co_p_value, "bFP-CER": fp_cer_p_value, "bDMN-FP": dmn_fp_p_value, "bFP-CO": fp_co_p_value}
permute_pd = pd.DataFrame(permute_results, index=["Permution Test P Values"])
permute_pd.to_csv(os.path.join(output_filename, "connectivity_permutation_results.csv"))
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/metrics/pairwise.py | 49 | 44088 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
Visdoom/psignifit-4.0 | psignifit/psigniplot.py | 1 | 25546 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 17:34:08 2016
@author: original Heiko, ported by Ole and Sophie
"""
import numpy as np
from scipy.signal import convolve as convn
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib import cm
from .private.marginalize import marginalize
from .private.utils import my_norminv
def plotPsych(result,
dataColor = [0, 105./255, 170./255],
plotData = True,
lineColor = [0, 0, 0],
lineWidth = 2,
xLabel = 'Stimulus Level',
yLabel = 'Proportion Correct',
labelSize = 15,
fontSize = 10,
fontName = 'Helvetica',
tufteAxis = False,
plotAsymptote = True,
plotThresh = True,
aspectRatio = False,
extrapolLength = .2,
CIthresh = False,
dataSize = 0,
axisHandle = None):
"""
This function produces a plot of the fitted psychometric function with
the data.
"""
fit = result['Fit']
data = result['data']
options = result['options']
if axisHandle == None: axisHandle = plt.gca()
try:
plt.axes(axisHandle)
except TypeError:
raise ValueError('Invalid axes handle provided to plot in.')
if np.isnan(fit[3]): fit[3] = fit[2]
if data.size == 0: return
if dataSize == 0: dataSize = 10000. / np.sum(data[:,2])
if 'nAFC' in options['expType']:
ymin = 1. / options['expN']
ymin = min([ymin, min(data[:,1] / data[:,2])])
else:
ymin = 0
# PLOT DATA
holdState = plt.ishold()
if not holdState: plt.cla()
plt.hold(True)
xData = data[:,0]
if plotData:
yData = data[:,1] / data[:,2]
markerSize = np.sqrt(dataSize/2 * data[:,2])
for i in range(len(xData)):
plt.plot(xData[i], yData[i], '.', ms=markerSize[i], c=dataColor, clip_on=False)
# PLOT FITTED FUNCTION
if options['logspace']:
xMin = np.log(min(xData))
xMax = np.log(max(xData))
xLength = xMax - xMin
x = np.exp(np.linspace(xMin, xMax, num=1000))
xLow = np.exp(np.linspace(xMin - extrapolLength*xLength, xMin, num=100))
xHigh = np.exp(np.linspace(xMax, xMax + extrapolLength*xLength, num=100))
axisHandle.set_xscale('log')
else:
xMin = min(xData)
xMax = max(xData)
xLength = xMax - xMin
x = np.linspace(xMin, xMax, num=1000)
xLow = np.linspace(xMin - extrapolLength*xLength, xMin, num=100)
xHigh = np.linspace(xMax, xMax + extrapolLength*xLength, num=100)
fitValuesLow = (1 - fit[2] - fit[3]) * options['sigmoidHandle'](xLow, fit[0], fit[1]) + fit[3]
fitValuesHigh = (1 - fit[2] - fit[3]) * options['sigmoidHandle'](xHigh, fit[0], fit[1]) + fit[3]
fitValues = (1 - fit[2] - fit[3]) * options['sigmoidHandle'](x, fit[0], fit[1]) + fit[3]
plt.plot(x, fitValues, c=lineColor, lw=lineWidth, clip_on=False)
plt.plot(xLow, fitValuesLow, '--', c=lineColor, lw=lineWidth, clip_on=False)
plt.plot(xHigh, fitValuesHigh, '--', c=lineColor, lw=lineWidth, clip_on=False)
# PLOT PARAMETER ILLUSTRATIONS
# THRESHOLD
if plotThresh:
if options['logspace']:
x = [np.exp(fit[0]), np.exp(fit[0])]
else:
x = [fit[0], fit[0]]
y = [ymin, fit[3] + (1 - fit[2] - fit[3]) * options['threshPC']]
plt.plot(x, y, '-', c=lineColor)
# ASYMPTOTES
if plotAsymptote:
plt.plot([min(xLow), max(xHigh)], [1-fit[2], 1-fit[2]], ':', c=lineColor, clip_on=False)
plt.plot([min(xLow), max(xHigh)], [fit[3], fit[3]], ':', c=lineColor, clip_on=False)
# CI-THRESHOLD
if CIthresh:
CIs = result['confIntervals']
y = np.array([fit[3] + .5*(1 - fit[2] - fit[3]) for i in range(2)])
plt.plot(CIs[0,:,0], y, c=lineColor)
plt.plot([CIs[0,0,0], CIs[0,0,0]], y + [-.01, .01], c=lineColor)
plt.plot([CIs[0,1,0], CIs[0,1,0]], y + [-.01, .01], c=lineColor)
#AXIS SETTINGS
plt.axis('tight')
plt.tick_params(labelsize=fontSize)
plt.xlabel(xLabel, fontname=fontName, fontsize=labelSize)
plt.ylabel(yLabel, fontname=fontName, fontsize=labelSize)
if aspectRatio: axisHandle.set_aspect(2/(1 + np.sqrt(5)))
plt.ylim([ymin, 1])
# tried to mimic box('off') in matlab, as box('off') in python works differently
plt.tick_params(direction='out',right='off',top='off')
for side in ['top','right']: axisHandle.spines[side].set_visible(False)
plt.ticklabel_format(style='sci',scilimits=(-2,4))
plt.hold(holdState)
plt.show()
return axisHandle
def plotsModelfit(result):
"""
Plots some standard plots, meant to help you judge whether there are
systematic deviations from the model. We dropped the statistical tests
here though.
The left plot shows the psychometric function with the data.
The central plot shows the Deviance residuals against the stimulus level.
Systematic deviations from 0 here would indicate that the measured data
shows a different shape than the fitted one.
The right plot shows the Deviance residuals against "time", e.g. against
the order of the passed blocks. A trend in this plot would indicate
learning/ changes in performance over time.
These are the same plots as presented in psignifit 2 for this purpose.
"""
fit = result['Fit']
data = result['data']
options = result['options']
minStim = min(data[:,0])
maxStim = max(data[:,0])
stimRange = [1.1*minStim - .1*maxStim, 1.1*maxStim - .1*minStim]
plt.figure(figsize=(15,5))
ax = plt.subplot(1,3,1)
# the psychometric function
x = np.linspace(stimRange[0], stimRange[1], 1000)
y = fit[3] + (1-fit[2]-fit[3]) * options['sigmoidHandle'](x, fit[0], fit[1])
plt.plot(x, y, 'k', clip_on=False)
plt.plot(data[:,0], data[:,1]/data[:,2], '.k', ms=10, clip_on=False)
plt.xlim(stimRange)
if options['expType'] == 'nAFC':
plt.ylim([min(1./options['expN'], min(data[:,1]/data[:,2])), 1])
else:
plt.ylim([0,1])
plt.xlabel('Stimulus Level', fontsize=14)
plt.ylabel('Percent Correct', fontsize=14)
plt.title('Psychometric Function', fontsize=20)
plt.tick_params(right='off',top='off')
for side in ['top','right']: ax.spines[side].set_visible(False)
plt.ticklabel_format(style='sci',scilimits=(-2,4))
ax = plt.subplot(1,3,2)
# stimulus level vs deviance
stdModel = fit[3] + (1-fit[2]-fit[3]) * options['sigmoidHandle'](data[:,0],fit[0],fit[1])
deviance = data[:,1]/data[:,2] - stdModel
stdModel = np.sqrt(stdModel * (1-stdModel))
deviance = deviance / stdModel
xValues = np.linspace(minStim, maxStim, 1000)
plt.plot(data[:,0], deviance, 'k.', ms=10, clip_on=False)
linefit = np.polyfit(data[:,0],deviance,1)
plt.plot(xValues, np.polyval(linefit,xValues),'k-', clip_on=False)
linefit = np.polyfit(data[:,0],deviance,2)
plt.plot(xValues, np.polyval(linefit,xValues),'k--', clip_on=False)
linefit = np.polyfit(data[:,0],deviance,3)
plt.plot(xValues, np.polyval(linefit,xValues),'k:', clip_on=False)
plt.xlabel('Stimulus Level', fontsize=14)
plt.ylabel('Deviance', fontsize=14)
plt.title('Shape Check', fontsize=20)
plt.tick_params(right='off',top='off')
for side in ['top','right']: ax.spines[side].set_visible(False)
plt.ticklabel_format(style='sci',scilimits=(-2,4))
ax = plt.subplot(1,3,3)
# block number vs deviance
blockN = range(len(deviance))
xValues = np.linspace(min(blockN), max(blockN), 1000)
plt.plot(blockN, deviance, 'k.', ms=10, clip_on=False)
linefit = np.polyfit(blockN,deviance,1)
plt.plot(xValues, np.polyval(linefit,xValues),'k-', clip_on=False)
linefit = np.polyfit(blockN,deviance,2)
plt.plot(xValues, np.polyval(linefit,xValues),'k--', clip_on=False)
linefit = np.polyfit(blockN,deviance,3)
plt.plot(xValues, np.polyval(linefit,xValues),'k:', clip_on=False)
plt.xlabel('Block #', fontsize=14)
plt.ylabel('Deviance', fontsize=14)
plt.title('Time Dependence?', fontsize=20)
plt.tick_params(right='off',top='off')
for side in ['top','right']: ax.spines[side].set_visible(False)
plt.ticklabel_format(style='sci',scilimits=(-2,4))
plt.tight_layout()
plt.show()
def plotMarginal(result,
dim = 0,
lineColor = [0, 105/255, 170/255],
lineWidth = 2,
xLabel = '',
yLabel = 'Marginal Density',
labelSize = 15,
tufteAxis = False,
prior = True,
priorColor = [.7, .7, .7],
CIpatch = True,
plotPE = True,
axisHandle = None):
"""
Plots the marginal for a single dimension.
result should be a result struct from the main psignifit routine
dim is the parameter to plot:
1=threshold, 2=width, 3=lambda, 4=gamma, 5=sigma
"""
from .private.utils import strToDim
if isinstance(dim,str): dim = strToDim(dim)
if len(result['marginals'][dim]) <= 1:
print('Error: The parameter you wanted to plot was fixed in the analysis!')
return
if axisHandle == None: axisHandle = plt.gca()
try:
plt.axes(axisHandle)
plt.rc('text', usetex=True)
except TypeError:
raise ValueError('Invalid axes handle provided to plot in.')
if not xLabel:
if dim == 0: xLabel = 'Threshold'
elif dim == 1: xLabel = 'Width'
elif dim == 2: xLabel = r'$\lambda$'
elif dim == 3: xLabel = r'$\gamma$'
elif dim == 4: xLabel = r'$\eta$'
x = result['marginalsX'][dim]
marginal = result['marginals'][dim]
CI = np.hstack(result['conf_Intervals'][dim].T)
Fit = result['Fit'][dim]
holdState = plt.ishold()
if not holdState: plt.cla()
plt.hold(True)
# patch for confidence region
if CIpatch:
xCI = np.array([CI[0], CI[1], CI[1], CI[0]])
xCI = np.insert(xCI, 1, x[np.logical_and(x>=CI[0], x<=CI[1])])
yCI = np.array([np.interp(CI[0], x, marginal), np.interp(CI[1], x, marginal), 0, 0])
yCI = np.insert(yCI, 1, marginal[np.logical_and(x>=CI[0], x<=CI[1])])
from matplotlib.patches import Polygon as patch
color = .5*np.array(lineColor) + .5* np.array([1,1,1])
axisHandle.add_patch(patch(np.array([xCI,yCI]).T, fc=color, ec=color))
# plot prior
if prior:
xprior = np.linspace(min(x), max(x), 1000)
plt.plot(xprior, result['options']['priors'][dim](xprior), '--', c=priorColor, clip_on=False)
# posterior
plt.plot(x, marginal, lw=lineWidth, c=lineColor, clip_on=False)
# point estimate
if plotPE:
plt.plot([Fit,Fit], [0, np.interp(Fit, x, marginal)], 'k', clip_on=False)
plt.xlim([min(x), max(x)])
plt.ylim([0, 1.1*max(marginal)])
plt.xlabel(xLabel, fontsize=labelSize, visible=True)
# if tufteAxis
plt.ylabel(yLabel, fontsize=labelSize, visible=True)
# if tufteAxis
# else:
plt.tick_params(direction='out', right='off', top='off')
for side in ['top','right']: axisHandle.spines[side].set_visible(False)
plt.ticklabel_format(style='sci', scilimits=(-2,4))
plt.hold(holdState)
plt.show()
return axisHandle
def getColorMap():
"""
This function returns the standard University of Tuebingen Colormap.
"""
midBlue = np.array([165, 30, 55])/255
lightBlue = np.array([210, 150, 0])/255
steps = 200
MAP = mcolors.LinearSegmentedColormap.from_list('Tuebingen', \
[midBlue, lightBlue, [1,1,1]],N = steps, gamma = 1.0)
cm.register_cmap(name = 'Tuebingen', cmap = MAP)
return MAP
def plotBayes(result, cmap = getColorMap()):
plt.clf()
plt.rc('text', usetex=True)
plt.set_cmap(cmap)
if result['options']['expType'] == 'equalAsymptote':
result['X1D'][3] = 0
for ix in range(0,4):
for jx in range(ix+1,5):
plt.subplot(4,4,4*ix+jx)
#marginalize
marg, _, _ = marginalize(result,np.array([ix,jx]))
e = np.array([result['X1D'][jx][0], result['X1D'][jx][-1], \
result['X1D'][ix][0], result['X1D'][ix][-1] ])
if e[0] == e[1]:
e[0] -= e[0]
e[1] += e[1]
if e[2] == e[3]:
e[2] -= e[2]
e[3] += e[3]
if marg.ndim == 1:
marg = np.reshape(marg, [-1, 1])
if len(result['X1D'][ix]) != 1:
plt.imshow(marg, extent = e, aspect='auto')
else:
plt.imshow(marg.transpose(), extent = e, aspect='auto')
else:
plt.imshow(marg, extent = e, aspect='auto')
# axis labels
if ix == 0:
plt.ylabel('threshold')
elif ix == 1:
plt.ylabel('width')
elif ix == 2:
plt.ylabel(r'$\lambda$')
elif ix == 3:
plt.ylabel(r'$\gamma$')
if jx == 0:
plt.xlabel('threshold')
elif jx == 1:
plt.xlabel('width')
elif jx == 2:
plt.xlabel(r'$\lambda$')
elif jx == 3:
plt.xlabel(r'$\gamma$')
elif jx == 4:
plt.xlabel(r'$\eta')
plt.show()
def plotPrior(result,
lineWidth = 2,
lineColor = np.array([0,105,170])/255,
markerSize = 30):
"""
This function creates the plot illustrating the priors on the different
parameters
"""
data = result['data']
if np.size(result['options']['stimulusRange']) <= 1:
result['options']['stimulusRange'] = np.array([min(data[:,0]), max(data[:,0])])
stimRangeSet = False
else:
stimRangeSet = True
stimRange = result['options']['stimulusRange']
r = stimRange[1] - stimRange[0]
# get borders for width
# minimum = minimal difference of two stimulus levels
if len(np.unique(data[:,0])) > 1 and not(stimRangeSet):
widthmin = min(np.diff(np.sort(np.unique(data[:,0]))))
else:
widthmin = 100*np.spacing(stimRange[1])
# maximum = spread of the data
# We use the same prior as we previously used... e.g. we use the factor by
# which they differ for the cumulative normal function
Cfactor = (my_norminv(.95,0,1) - my_norminv(.05,0,1))/ \
(my_norminv(1-result['options']['widthalpha'], 0,1) - \
my_norminv(result['options']['widthalpha'], 0,1))
widthmax = r
steps = 10000
theta = np.empty(5)
for itheta in range(0,5):
if itheta == 0:
x = np.linspace(stimRange[0]-.5*r, stimRange[1]+.5*r, steps)
elif itheta == 1:
x = np.linspace(min(result['X1D'][itheta]), max(result['X1D'][1],),steps)
elif itheta == 2:
x = np.linspace(0,.5,steps)
elif itheta == 3:
x = np.linspace(0,.5,steps)
elif itheta == 4:
x = np.linspace(0,1,steps)
y = result['options']['priors'][itheta](x)
theta[itheta] = np.sum(x*y)/np.sum(y)
if result['options']['expType'] == 'equalAsymptote':
theta[3] = theta[2]
if result['options']['expType'] == 'nAFC':
theta[3] = 1/result['options']['expN']
# get limits for the psychometric function plots
xLimit = [stimRange[0] - .5*r , stimRange[1] +.5*r]
""" threshold """
xthresh = np.linspace(xLimit[0], xLimit[1], steps )
ythresh = result['options']['priors'][0](xthresh)
wthresh = convn(np.diff(xthresh), .5*np.array([1,1]))
cthresh = np.cumsum(ythresh*wthresh)
plt.subplot(2,3,1)
plt.plot(xthresh,ythresh, lw = lineWidth, c= lineColor)
plt.hold(True)
plt.xlim(xLimit)
plt.title('Threshold', fontsize = 18)
plt.ylabel('Density', fontsize = 18)
plt.subplot(2,3,4)
plt.plot(data[:,0], np.zeros(data[:,0].shape), 'k.', ms = markerSize*.75 )
plt.hold(True)
plt.ylabel('Percent Correct', fontsize = 18)
plt.xlim(xLimit)
for idot in range(0,5):
if idot == 0:
xcurrent = theta[0]
color = 'k'
elif idot == 1:
xcurrent = min(xthresh)
color = [1,200/255,0]
elif idot == 2:
tix = cthresh[cthresh >=.25].size
xcurrent = xthresh[-tix]
color = 'r'
elif idot == 3:
tix = cthresh[cthresh >= .75].size
xcurrent = xthresh[-tix]
color = 'b'
elif idot == 4:
xcurrent = max(xthresh)
color = 'g'
y = 100*(theta[3]+(1-theta[2])-theta[3])*result['options']['sigmoidHandle'](x,xcurrent, theta[1])
plt.subplot(2,3,4)
plt.plot(x,y, '-', lw=lineWidth,c=color )
plt.subplot(2,3,1)
plt.plot(xcurrent, result['options']['priors'][0](xcurrent), '.',c=color, ms = markerSize)
""" width"""
xwidth = np.linspace(widthmin, 3/Cfactor*widthmax, steps)
ywidth = result['options']['priors'][1](xwidth)
wwidth = convn(np.diff(xwidth), .5*np.array([1,1]))
cwidth = np.cumsum(ywidth*wwidth)
plt.subplot(2,3,2)
plt.plot(xwidth,ywidth,lw=lineWidth,c=lineColor)
plt.hold(True)
plt.xlim([widthmin,3/Cfactor*widthmax])
plt.title('Width',fontsize=18)
plt.subplot(2,3,5)
plt.plot(data[:,0],np.zeros(data[:,0].size),'k.',ms =markerSize*.75)
plt.hold(True)
plt.xlim(xLimit)
plt.xlabel('Stimulus Level',fontsize=18)
x = np.linspace(xLimit[0],xLimit[1],steps)
for idot in range(0,5):
if idot == 0:
xcurrent = theta[1]
color = 'k'
elif idot == 1:
xcurrent = min(xwidth)
color = [1,200/255,0]
elif idot == 2:
wix = cwidth[cwidth >= .25].size
xcurrent = xwidth[-wix]
color = 'r'
elif idot == 3:
wix = cwidth[cwidth >= .75].size
xcurrent = xwidth[-wix]
color = 'b'
elif idot ==4:
xcurrent = max(xwidth)
color = 'g'
y = 100*(theta[3]+ (1-theta[2] -theta[3])* result['options']['sigmoidHandle'](x,theta[0],xcurrent))
plt.subplot(2,3,5)
plt.plot(x,y,'-',lw = lineWidth, c= color)
plt.subplot(2,3,2)
plt.plot(xcurrent,result['options']['priors'][1](xcurrent),'.',c = color,ms=markerSize)
""" lapse """
xlapse = np.linspace(0,.5,steps)
ylapse = result['options']['priors'][2](xlapse)
wlapse = convn(np.diff(xlapse),.5*np.array([1,1]))
clapse = np.cumsum(ylapse*wlapse)
plt.subplot(2,3,3)
plt.plot(xlapse,ylapse,lw=lineWidth,c=lineColor)
plt.hold(True)
plt.xlim([0,.5])
plt.title('\lambda',fontsize=18)
plt.subplot(2,3,6)
plt.plot(data[:,0],np.zeros(data[:,0].size),'k.',ms=markerSize*.75)
plt.hold(True)
plt.xlim(xLimit)
x = np.linspace(xLimit[0],xLimit[1],steps)
for idot in range(0,5):
if idot == 0:
xcurrent = theta[2]
color = 'k'
elif idot == 1:
xcurrent = 0
color = [1,200/255,0]
elif idot == 2:
lix = clapse[clapse >= .25].size
xcurrent = xlapse[-lix]
color = 'r'
elif idot == 3:
lix = clapse[clapse >= .75].size
xcurrent = xlapse[-lix]
color = 'b'
elif idot ==4:
xcurrent = .5
color = 'g'
y = 100*(theta[3]+ (1-xcurrent-theta[3])*result['options']['sigmoidHandle'](x,theta[0],theta[1]))
plt.subplot(2,3,6)
plt.plot(x,y,'-',lw=lineWidth,c=color)
plt.subplot(2,3,3)
plt.plot(xcurrent,result['options']['priors'][2](xcurrent),'.',c=color,ms=markerSize)
# a_handle = plt.gca()
# a_handle.set_position([200,300,1000,600])
# fig, ax = plt.subplots()
#
# for item in [fig, ax]:
# item.patch.set_visible(False)
plt.show()
def plot2D(result,par1,par2,
colorMap = getColorMap(),
labelSize = 15,
fontSize = 10,
axisHandle = None):
"""
This function constructs a 2 dimensional marginal plot of the posterior
density. This is the same plot as it is displayed in plotBayes in an
unmodifyable way.
The result struct is passed as result.
par1 and par2 should code the two parameters to plot:
0 = threshold
1 = width
2 = lambda
3 = gamma
4 = eta
Further plotting options may be passed.
"""
from .private.utils import strToDim
# convert strings to dimension number
par1,label1 = strToDim(str(par1))
par2,label2 = strToDim(str(par2))
assert (par1 != par2), 'par1 and par2 must be different numbers to code for the parameters to plot'
if axisHandle == None:
axisHandle = plt.gca()
try:
plt.axes(axisHandle)
except TypeError:
raise ValueError('Invalid axes handle provided to plot in.')
plt.set_cmap(colorMap)
marg, _, _ = marginalize(result, np.array([par1, par2]))
if par1 > par2 :
marg = marg.T
if 1 in marg.shape:
if len(result['X1D'][par1])==1:
plotMarginal(result,par2)
else:
plotMarginal(result,par2)
else:
e = [result['X1D'][par2][0], result['X1D'][par2][-1], \
result['X1D'][par1][0], result['X1D'][par1][-1]]
plt.imshow(marg, extent = e)
plt.ylabel(label1,fontsize = labelSize)
plt.xlabel(label2,fontsize = labelSize)
plt.tick_params(direction='out',right='off',top='off')
for side in ['top','right']: axisHandle.spines[side].set_visible(False)
plt.ticklabel_format(style='sci',scilimits=(-2,4))
plt.show()
if __name__ == "__main__":
result = {}
result['Fit'] = np.array([.004651, .004658, 1.7125E-7, .5, 1.0632E-4])
options = {}
options['expType'] = 'nAFC'
options['expN'] = 2
options['logspace'] = False
options['threshPC'] = .5
from .private.utils import my_normcdf
alpha = .05
C = my_norminv(1-alpha,0,1)-my_norminv(alpha,0,1)
options['sigmoidHandle'] = lambda X,m,width: my_normcdf(X, (m-my_norminv(.5,0,width/C)), width/C)
tmp1 = np.array([10,15,20,25,30,35,40,45,50,60,70,80,100], dtype=float)/10000
tmp2 = np.array([45,50,44,44,52,53,62,64,76,79,88,90,90], dtype=float)
tmp3 = np.array([90 for i in range(len(tmp1))], dtype=float)
data = np.array([tmp1,tmp2,tmp3]).T
result['data'] = data
options['stimulusRange'] = 0
options['widthalpha'] = .05
options['betaPrior'] = 10
options['priors'] = [lambda x: [74.074074196287796 for i in range(len(x))]]
result['options'] = options
CIs = np.zeros((5,2,3))
CIs[:,:,0] = [[.0043,.0050],[.0035,.0060],[.0002,.0219],[.5,.5],[.0013,.1196]]
CIs[:,:,1] = [[.0043,.0049],[.0037,.0058],[.0003,.0181],[.5,.5],[.0026,.1016]]
CIs[:,:,2] = [[.0045,.0048],[.0041,.0053],[.0011,.0112],[.5,.5],[.0083,.0691]]
result['confIntervals'] = CIs
m1 = np.array(
[.0082,.0136,.0229,.0394,.0693,.1252,.2334,.4513,.9106,1.93,4.3147,10.1299,24.5262,
59.3546,138.382,300.3194,590.1429,1.0289E3,1.5691E3,2.0739E3,2.3629E3,2.3158E3,
1.9536E3,1.4237E3,902.2289,502.3969,249.541,112.9197,47.8892,19.7137,8.1762,3.5234,
1.6037,.7722,.3908,.206,.1124,.063,.0362,.0212])
marg = np.empty((5,),dtype=object)
marg[0] = m1
result['marginals'] = marg
m1x = np.array(
[0.003327586206897,
0.003391246684350,
0.003454907161804,
0.003518567639257,
0.003582228116711,
0.003645888594164,
0.003709549071618,
0.003773209549072,
0.003836870026525,
0.003900530503979,
0.003964190981432,
0.004027851458886,
0.004091511936340,
0.004155172413793,
0.004218832891247,
0.004282493368700,
0.004346153846154,
0.004409814323607,
0.004473474801061,
0.004537135278515,
0.004600795755968,
0.004664456233422,
0.004728116710875,
0.004791777188329,
0.004855437665782,
0.004919098143236,
0.004982758620690,
0.005046419098143,
0.005110079575597,
0.005173740053050,
0.005237400530504,
0.005301061007958,
0.005364721485411,
0.005428381962865,
0.005492042440318,
0.005555702917772,
0.005619363395225,
0.005683023872679,
0.005746684350133,
0.005810344827586])
marg = np.empty((5,),dtype=object)
marg[0] = m1x
result['marginalsX'] = marg
#plotPsych(result,CIthresh=True)
plotsModelfit(result)
#plotMarginal(result)
| gpl-3.0 |
rahul-c1/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
pw31/GGchem | tools/kp_compare.py | 1 | 12938 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, ScalarFormatter
plt.rcParams['axes.linewidth'] = 1.5
single_figures = 0
# units are cgs
bar = 1.E+6 # 1 bar in dyn/cm2
atm = 1.013E+6 # 1 atm in dyn/cm2
R = 8.3144598 # J/mol/K
Rcal= 1.987 # cal/mol
f = open('../data/dispol_BarklemCollet.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NBC = int(header.split()[0])
BCname = np.empty(NBC, dtype="S12")
BCdat = np.zeros([NBC,5])
for i in range(0,NBC*2,2):
#print lines[i]
BCname[i/2] = lines[i].split()[0]
tmp = lines[i+1].split()[1:6]
for j in range(0,5):
BCdat[i/2,j] = float(tmp[j])
print "BC species",NBC,BCname
f = open('../data/dispol_GGchem.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NGG = int(header)
GGname = np.empty(NGG, dtype="S18")
GGdat = np.zeros([NGG,5])
GGcode = np.zeros(NGG,dtype='int')
for i in range(0,NGG*2,2):
#print lines[i]
GGname[i/2] = lines[i].split()[0]
GGcode[i/2] = int(lines[i+1].split()[0])
dat = lines[i+1].split()[1:6]
for j in range(0,5):
GGdat[i/2,j] = float(dat[j])
print "GG species",NGG,GGname
f = open('../data/dispol_BURCAT.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NBU = int(header)
BUname = np.empty(NBU, dtype="S18")
BUdat = np.zeros([NBU,14])
BUcode = np.zeros(NBU,dtype='int')
for i in range(0,NBU*2,2):
#print lines[i]
BUname[i/2] = lines[i].split()[0]
BUcode[i/2] = int(lines[i+1].split()[0])
dat = lines[i+1].split()[1:15]
for j in range(0,14):
BUdat[i/2,j] = float(dat[j])
np.set_printoptions(threshold=3000)
print "BU species",NBU,BUname
f = open('../data/dispol_StockKitzmann.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NSK = int(header)
SKname = np.empty(NSK, dtype="S12")
SKdat = np.zeros([NSK,5])
for i in range(0,NSK*2,2):
#print lines[i]
SKname[i/2] = lines[i].split()[0]
dat = lines[i+1].split()[1:6]
for j in range(0,5):
SKdat[i/2,j] = float(dat[j])
print "SK species",NSK,SKname
f = open('../data/dispol_Tsuji.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NT = int(header)
Tname = np.empty(NT, dtype="S12")
Tdat = np.zeros([NT,5])
for i in range(0,NT*2,2):
#print lines[i]
Tname[i/2] = lines[i].split()[0]
dat = lines[i+1].split()[1:6]
for j in range(0,5):
Tdat[i/2,j] = float(dat[j])
print "Tsuji species",Tname
f = open('../data/dispol_SharpHuebner.dat','r')
header = f.readline()
lines = f.readlines()[:]
f.close
NSH = int(header)
SHname = np.empty(NSH, dtype="S12")
SHdat = np.zeros([NSH,5])
SHcode = np.zeros(NSH,dtype='int')
for i in range(0,NSH*2,2):
#print lines[i]
SHname[i/2] = lines[i].split()[0]
dat = lines[i+1].split()[1:6]
SHcode[i/2] = lines[i+1].split()[0]
#print i,SHname[i/2],dat
for j in range(0,5):
SHdat[i/2,j] = float(dat[j])
print "SH species",SHname
print
name = raw_input("which species? ")
Natom = int(raw_input("Natom=? "))
Tmin = 100.0
Tmax = 6500.0
T = np.arange(Tmin,Tmax,1.0)
T2 = np.arange(400.0,Tmax,1.0)
#================== Fig 1 ===========================
if (single_figures==0): pp=PdfPages('kp.pdf')
if (single_figures==1): pp=PdfPages('kp_'+name+'_kp.pdf')
plt.figure(figsize=(4,4))
kpmean = 0*T
Nmean = 0
ln10 = np.log(10)
ind1 = np.where(name.upper()==GGname)[0]
if (len(ind1)>0):
if (GGcode[ind1[0]]==1):
a = GGdat[ind1[0],0:5]
Th = 5040.0/T
lnkp1 = a[0] + a[1]*Th + a[2]*Th**2 + a[3]*Th**3 + a[4]*Th**4
#print "GGchem",GGname[ind1[0]],a
plt.plot(T,lnkp1/ln10,c='black',lw=2.5,label='old GGchem')
print "GGchem",GGname[ind1[0]],a
#kpmean = kpmean+lnkp1
#Nmean = Nmean+1
else:
ind1=''
ind2 = np.where(name==SKname)[0]
if (len(ind2)>0):
a = SKdat[ind2[0],0:5]
dGRT = a[0]/T + a[1]*np.log(T) + a[2] + a[3]*T + a[4]*T**2
lnkp2 = dGRT + (1-Natom)*np.log(bar)
print "S&K",SKname[ind2[0]],a
plt.plot(T,lnkp2/ln10,c='green',lw=3.5,label='Stock')
kpmean = kpmean+lnkp2
Nmean = Nmean+1
ind4 = np.where(name==SHname)[0]
if (len(ind4)>0):
if (SHcode[ind4[0]]==3):
a = SHdat[ind4[0],0:5]
dG = a[0]/T + a[1] + a[2]*T + a[3]*T**2 + a[4]*T**3
lnkp4 = -dG/(Rcal*T) + (1-Natom)*np.log(atm)
print 'S&H',SHname[ind4[0]],a
plt.plot(T,lnkp4/ln10,c='orange',lw=1.5,label='Sharp & Huebner')
#kpmean = kpmean+lnkp4
#Nmean = Nmean+1
else:
ind4 = ''
ind5 = np.where(name==Tname)[0]
if (len(ind5)>0):
a = Tdat[ind5[0],0:5]
Th = 5040.0/T2
lnkp5 = -(a[0] + a[1]*Th + a[2]*Th**2 + a[3]*Th**3 + a[4]*Th**4)*np.log(10)
Th = 5040.0/T
lnkp51= -(a[0] + a[1]*Th + a[2]*Th**2 + a[3]*Th**3 + a[4]*Th**4)*np.log(10)
print 'Tsuji',Tname[ind5[0]],a
plt.plot(T2,lnkp5/ln10,c='magenta',lw=1.5,label='Tsuji')
ind3 = np.where(name==BCname)[0]
if (len(ind3)>0):
a = BCdat[ind3[0],0:5]
dGRT = a[0]/T + a[1]*np.log(T) + a[2] + a[3]*T + a[4]*T**2
lnkp3 = dGRT + (1-Natom)*np.log(bar)
print 'B&C',BCname[ind3[0]],a
plt.plot(T,lnkp3/ln10,c='blue',ls='--',lw=1.5,label='Barklem & Collet')
kpmean = kpmean+lnkp3
Nmean = Nmean+1
ind6 = np.where(name.upper()==BUname)[0]
if (len(ind6)>0):
a = BUdat[ind6[0],0:14]
lnkp6 = 0.0*T
j = 0
for Tg in T:
if (Tg>1000.0):
H_RT = a[0] + a[1]*Tg/2.0 + a[2]*Tg**2/3.0 + a[3]*Tg**3/4.0 + a[4]*Tg**4/5.0 + a[5]/Tg
S_R = a[0]*np.log(Tg) + a[1]*Tg + a[2]*Tg**2/2.0 + a[3]*Tg**3/3.0 + a[4]*Tg**4/4.0 + a[6]
else:
H_RT = a[7] + a[8]*Tg/2.0 + a[9]*Tg**2/3.0 + a[10]*Tg**3/4.0 + a[11]*Tg**4/5.0 + a[12]/Tg
S_R = a[7]*np.log(Tg) + a[8]*Tg + a[9]*Tg**2/2.0 + a[10]*Tg**3/3.0 + a[11]*Tg**4/4.0 + a[13]
dGRT = H_RT - S_R
lnkp6[j] = -dGRT + (1-Natom)*np.log(bar)
j += 1
print "BURCAT",BUname[ind6[0]],a
plt.plot(T,lnkp6/ln10,c='red',ls=':',lw=2.0,label='BURCAT')
kpmean = kpmean+lnkp6
Nmean = Nmean+1
kpmean = kpmean/Nmean
plt.xlabel(r'$T\,\mathrm{[K]}$',fontsize=15)
plt.ylabel(r'$\log_{10} k_p \mathrm{[cgs]}$',fontsize=15)
plt.title(name)
plt.xlim(Tmin,Tmax)
plt.xscale('log')
plt.subplots_adjust(left=0.21, right=0.94, top=0.94, bottom=0.14)
plt.tick_params(axis='both', which='major', length=6,width=1.5)
plt.tick_params(axis='both', which='minor', length=4,width=1)
plt.legend(loc='upper right',fontsize=10)
plt.savefig(pp,format='pdf')
#================== Fig 2 ===========================
if (single_figures==0): plt.clf()
if (single_figures==1): pp.close()
if (single_figures==1): pp=PdfPages('kp_'+name+'_kperr.pdf')
plt.figure(figsize=(4,4))
ymin = 0.0
ymax = 0.0
iT = np.where(T>300)[0]
iT1= np.where(T>600)[0]
iT2= np.where(T>1000)[0]
print iT
if (len(ind1)>0):
plt.plot(T,(lnkp1-kpmean)/ln10,c='black',lw=2.5,label='old GGchem')
ymin = np.min([ymin,np.min(lnkp1[iT]-kpmean[iT])])
ymax = np.max([ymax,np.max(lnkp1[iT]-kpmean[iT])])
print "GG",ymin,ymax
if (len(ind2)>0):
plt.plot(T,(lnkp2-kpmean)/ln10,c='green',lw=3.5,label='Stock')
ymin = np.min([ymin,np.min(lnkp2[iT]-kpmean[iT])])
ymax = np.max([ymax,np.max(lnkp2[iT]-kpmean[iT])])
print "SK",ymin,ymax
if (len(ind4)>0):
plt.plot(T,(lnkp4-kpmean)/ln10,c='orange',lw=1.5,label='Sharp & Huebner')
ymin = np.min([ymin,np.min(lnkp4[iT1]-kpmean[iT1])])
ymax = np.max([ymax,np.max(lnkp4[iT1]-kpmean[iT1])])
print "SH",ymin,ymax
if (len(ind5)>0):
plt.plot(T,(lnkp51-kpmean)/ln10,c='magenta',lw=1.5,label='Tsuji')
ymin = np.min([ymin,np.min(lnkp51[iT2]-kpmean[iT2])])
ymax = np.max([ymax,np.max(lnkp51[iT2]-kpmean[iT2])])
print "Tsu",ymin,ymax
if (len(ind3)>0):
plt.plot(T,(lnkp3-kpmean)/ln10,c='blue',ls='--',lw=1.5,label='Barklem & Collet')
ymin = np.min([ymin,np.min(lnkp3[iT]-kpmean[iT])])
ymax = np.max([ymax,np.max(lnkp3[iT]-kpmean[iT])])
print "BC",ymin,ymax
if (len(ind6)>0):
plt.plot(T,(lnkp6-kpmean)/ln10,c='red',ls=':',lw=2.0,label='BURCAT')
ymin = np.min([ymin,np.min(lnkp6[iT]-kpmean[iT])])
ymax = np.max([ymax,np.max(lnkp6[iT]-kpmean[iT])])
print "BU",ymin,ymax
plt.xlabel(r'$T\,\mathrm{[K]}$',fontsize=15)
plt.ylabel(r'$\log_{10} k_p - \langle\log_{10} k_p\rangle \mathrm{[cgs]}$',fontsize=15)
plt.title(name)
plt.xlim(Tmin,Tmax)
#ymax = ymax+0.5*(ymax-ymin)
#dy = ymax-ymin
#plt.ylim((ymin-dy)/ln10,(ymax+dy)/ln10)
plt.ylim(-4,+4)
plt.xscale('log')
plt.subplots_adjust(left=0.19, right=0.94, top=0.94, bottom=0.14)
plt.tick_params(axis='both', which='major', length=6,width=1.5)
plt.tick_params(axis='both', which='minor', length=4,width=1)
#plt.legend(loc='upper right',fontsize=10)
plt.savefig(pp,format='pdf')
#================== Fig 3 ===========================
if (single_figures==0): plt.clf()
if (single_figures==1): pp.close()
if (single_figures==1): pp=PdfPages('kp_'+name+'_dG.pdf')
Gmean = 0*T
Nmean = 0
fig,ax = plt.subplots(figsize=(4,4))
if (len(ind1)>0):
dG = ((1-Natom)*np.log(bar) - lnkp1)*R*T/1000
#Gmean = Gmean+dG
#Nmean = Nmean+1
plt.plot(T,dG,c='black',lw=2.5,label='old GGchem')
if (len(ind2)>0):
dG = ((1-Natom)*np.log(bar) - lnkp2)*R*T/1000
Gmean = Gmean+dG
Nmean = Nmean+1
plt.plot(T,dG,c='green',lw=3.5,label='Stock')
if (len(ind4)>0):
dG = ((1-Natom)*np.log(bar) - lnkp4)*R*T/1000
#Gmean = Gmean+dG
#Nmean = Nmean+1
plt.plot(T,dG,c='orange',lw=1.5,label='Sharp & Huebner')
if (len(ind5)>0):
dG2= ((1-Natom)*np.log(bar) - lnkp5)*R*T2/1000
#Gmean = Gmean+dG2
#Nmean = Nmean+1
plt.plot(T2,dG2,c='magenta',lw=1.5,label='Tsuji')
if (len(ind3)>0):
dG = ((1-Natom)*np.log(bar) - lnkp3)*R*T/1000
Gmean = Gmean+dG
Nmean = Nmean+1
plt.plot(T,dG,c='blue',ls='--',lw=1.5,label='Barklem & Collet')
if (len(ind6)>0):
dG = ((1-Natom)*np.log(bar) - lnkp6)*R*T/1000
Gmean = Gmean+dG
Nmean = Nmean+1
plt.plot(T,dG,c='red',ls=':',lw=2.0,label='BURCAT')
Gmean = Gmean/Nmean
plt.xlabel(r'$T\,\mathrm{[K]}$',fontsize=15)
plt.ylabel(r'$\Delta G_{\rm f}^\theta \mathrm{[kJ/mol]}$',fontsize=15)
plt.title(name)
plt.subplots_adjust(left=0.22, right=0.94, top=0.94, bottom=0.14)
plt.legend(loc='upper left',fontsize=10)
plt.tick_params(axis='both', which='major', length=6,width=1.5)
plt.tick_params(axis='both', which='minor', length=4,width=1)
plt.xlim(0,Tmax)
minorLocator = MultipleLocator(500.0)
ax.xaxis.set_minor_locator(minorLocator)
#minorLocator = MultipleLocator(50.0)
#ax.yaxis.set_minor_locator(minorLocator)
plt.savefig(pp,format='pdf')
#================== Fig 4 ===========================
if (single_figures==0): plt.clf()
if (single_figures==1): pp.close()
if (single_figures==1): pp=PdfPages('kp_'+name+'_dGerr.pdf')
fig,ax = plt.subplots(figsize=(4,4))
ymin = 0.0
ymax = 0.0
if (len(ind1)>0):
dG = ((1-Natom)*np.log(bar) - lnkp1)*R*T/1000
plt.plot(T,dG-Gmean,c='black',lw=2.5,label='old GGchem')
ymin = np.min([ymin,np.min(dG[iT]-Gmean[iT])])
ymax = np.max([ymax,np.max(dG[iT]-Gmean[iT])])
print "GG",ymin,ymax
if (len(ind2)>0):
dG = ((1-Natom)*np.log(bar) - lnkp2)*R*T/1000
plt.plot(T,dG-Gmean,c='green',lw=3.5,label='Stock')
ymin = np.min([ymin,np.min(dG[iT]-Gmean[iT])])
ymax = np.max([ymax,np.max(dG[iT]-Gmean[iT])])
print "SK",ymin,ymax
if (len(ind4)>0):
dG = ((1-Natom)*np.log(bar) - lnkp4)*R*T/1000
plt.plot(T,dG-Gmean,c='orange',lw=1.5,label='Sharp & Huebner')
ymin = np.min([ymin,np.min(dG[iT1]-Gmean[iT1])])
ymax = np.max([ymax,np.max(dG[iT1]-Gmean[iT1])])
print "SH",ymin,ymax
if (len(ind5)>0):
dG = ((1-Natom)*np.log(bar) - lnkp51)*R*T/1000
plt.plot(T,dG-Gmean,c='magenta',lw=1.5,label='Tsuji')
ymin = np.min([ymin,np.min(dG[iT2]-Gmean[iT2])])
ymax = np.max([ymax,np.max(dG[iT2]-Gmean[iT2])])
print "Tsu",ymin,ymax
if (len(ind3)>0):
dG = ((1-Natom)*np.log(bar) - lnkp3)*R*T/1000
plt.plot(T,dG-Gmean,c='blue',ls='--',lw=1.5,label='Barklem & Collet')
ymin = np.min([ymin,np.min(dG[iT]-Gmean[iT])])
ymax = np.max([ymax,np.max(dG[iT]-Gmean[iT])])
print "BK",ymin,ymax
if (len(ind6)>0):
dG = ((1-Natom)*np.log(bar) - lnkp6)*R*T/1000
plt.plot(T,dG-Gmean,c='red',ls=':',lw=2.0,label='BURCAT')
ymin = np.min([ymin,np.min(dG[iT]-Gmean[iT])])
ymax = np.max([ymax,np.max(dG[iT]-Gmean[iT])])
print "BU",ymin,ymax
plt.xlabel(r'$T\,\mathrm{[K]}$',fontsize=15)
plt.ylabel(r'$\Delta G_{\rm f}^\theta - \langle\Delta G_{\rm f}^\theta\rangle \mathrm{[kJ/mol]}$',fontsize=15)
plt.title(name)
plt.xlim(0,Tmax)
#plt.xscale('log')
#ymax = ymax+0.5*(ymax-ymin)
#dy = ymax-ymin
#plt.ylim(ymin-dy,ymax+dy)
plt.ylim(-30,+30)
plt.subplots_adjust(left=0.19, right=0.94, top=0.94, bottom=0.14)
#plt.legend(loc='upper left',fontsize=10)
plt.tick_params(axis='both', which='major', length=6,width=1.5)
plt.tick_params(axis='both', which='minor', length=4,width=1)
minorLocator = MultipleLocator(500.0)
ax.xaxis.set_minor_locator(minorLocator)
minorLocator = MultipleLocator(5.0)
ax.yaxis.set_minor_locator(minorLocator)
plt.savefig(pp,format='pdf')
pp.close()
print ' '
print 'written output to kp.pdf.'
| gpl-3.0 |
sgiavasis/nipype | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 10 | 7893 | from __future__ import absolute_import
import re
import inspect
import textwrap
import pydoc
import sphinx
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
from nipype.external.six import string_types
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in list(idx.items()):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], string_types):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str and
'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/kmpfit_Pearsonsdata.py | 1 | 5463 | #!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Program to best fit straight line parameters
# to data given by Pearson, 1901
# Vog, 12 Dec, 2011
#
# The data for x and y are from Pearson
# Pearson, K. 1901. On lines and planes of closest fit to systems
# of points in space. Philosophical Magazine 2:559-572
# Copy of this article can be found at:
# stat.smmu.edu.cn/history/pearson1901.pdf
#
# Pearson's best fit through (3.82,3.70) ->
# a=5.784 b=-0.54556
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from kapteyn import kmpfit
def model(p, x):
# Model: y = a + numpy.tan(theta)*x
a, theta = p
return a + numpy.tan(theta)*x
def residuals(p, data):
# Residuals function for data with errors in both coordinates
a, theta = p
x, y = data
B = numpy.tan(theta)
wi = 1/numpy.sqrt(1.0 + B*B)
d = wi*(y-model(p,x))
return d
def residuals2(p, data):
# Residuals function for data with errors in y only
a, b = p
x, y = data
d = (y-model(p,x))
return d
# Pearsons data
x = numpy.array([0.0, 0.9, 1.8, 2.6, 3.3, 4.4, 5.2, 6.1, 6.5, 7.4])
y = numpy.array([5.9, 5.4, 4.4, 4.6, 3.5, 3.7, 2.8, 2.8, 2.4, 1.5])
N = len(x)
beta0 = [5.0, 0.0] # Initial estimates
# Analytical solutions following Pearson's formulas
print("\nAnalytical solution")
print("===================")
x_av = x.mean()
y_av = y.mean()
sx = (x-x_av)
sy = (y-y_av)
Sx = sx.sum()
Sy = sy.sum()
Sxx = (sx*sx).sum()
Syy = (sy*sy).sum()
Sxy = (sx*sy).sum()
tan2theta = 2*Sxy/(Sxx-Syy)
twotheta = numpy.arctan(tan2theta)
b_pearson = numpy.tan(twotheta/2)
a_pearson = y_av - b_pearson*x_av
print("Best fit parameters: a=%.10f b=%.10f"%(a_pearson,b_pearson))
rxy = Sxy/numpy.sqrt(Sxx*Syy)
print("Pearson's Corr. coef: ", rxy)
tan2theta = 2*rxy*numpy.sqrt(Sxx*Syy)/(Sxx-Syy)
twotheta = numpy.arctan(tan2theta)
print("Pearson's best tan2theta, theta, slope: ", \
tan2theta, 0.5*twotheta, numpy.tan(0.5*twotheta))
b1 = rxy*numpy.sqrt(Syy)/numpy.sqrt(Sxx)
print("b1 (Y on X), slope: ", b1, b1)
b2 = rxy*numpy.sqrt(Sxx)/numpy.sqrt(Syy)
print("b2 (X on Y), slope", b2, 1/b2)
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y))
fitobj.fit(params0=beta0)
print("\n======== Results kmpfit: effective variance =========")
print("Params: ", fitobj.params[0], numpy.tan(fitobj.params[1]))
print("Covariance errors: ", fitobj.xerror)
print("Standard errors ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
# Prepare fit routine
fitobj2 = kmpfit.Fitter(residuals=residuals2, data=(x, y))
fitobj2.fit(params0=beta0)
print("\n======== Results kmpfit Y on X =========")
print("Params: ", fitobj2.params)
print("Covariance errors: ", fitobj2.xerror)
print("Standard errors ", fitobj2.stderr)
print("Chi^2 min: ", fitobj2.chi2_min)
print("Reduced Chi^2: ", fitobj2.rchi2_min)
a1, b1 = fitobj2.params[0], numpy.tan(fitobj2.params[1])
fitobj3 = kmpfit.Fitter(residuals=residuals2, data=(y, x))
fitobj3.fit(params0=(0,5))
print("\n======== Results kmpfit X on Y =========")
print("Params: ", fitobj3.params)
print("Covariance errors: ", fitobj3.xerror)
print("Standard errors ", fitobj3.stderr)
print("Chi^2 min: ", fitobj3.chi2_min)
print("Reduced Chi^2: ", fitobj3.rchi2_min)
a2, b2 = fitobj3.params[0], numpy.tan(fitobj3.params[1])
A2 = -a2/b2; B2 = 1/b2 # Get values for XY plane
print("\nLeast squares solution")
print("======================")
print("a1, b1 (Y on X)", a1, b1)
print("a2, b2 (X on Y)", A2, B2)
tan2theta = 2*b1*b2/(b2-b1)
twotheta = numpy.arctan(tan2theta)
best_slope = numpy.tan(0.5*twotheta)
print("Best fit tan2theta, Theta, slope: ", tan2theta, \
0.5*twotheta, best_slope)
best_offs = y_av - best_slope*x_av
print("Best fit parameters: a=%.10f b=%.10f"%(best_offs,best_slope))
bislope = (b1*B2-1+numpy.sqrt((1+b1*b1)*(1+B2*B2)))/(b1+B2)
abi = y_av - bislope*x_av
print("Bisector through centroid a, b: ",abi, bislope)
bbi = numpy.arctan(bislope) # Back to angle again
B2_angle = numpy.arctan(B2) # Back to angle again
# Some plotting
rc('font', size=9)
rc('legend', fontsize=7)
fig = figure(1)
d = (x.max() - x.min())/10
for i in [0,1]:
if i == 0:
X = numpy.linspace(x.min()-d, x.max()+d, 50)
frame = fig.add_subplot(2,1,i+1, aspect=1, adjustable='datalim')
else:
X = numpy.linspace(-0.9, -0.3, 50)
frame = fig.add_subplot(2,1,i+1, aspect=1)
frame.plot(x, y, 'oy')
frame.plot(X, model(fitobj.params,X), 'c', ls='--', lw=4, label="kmpfit effective variance")
frame.plot(X, model(fitobj2.params,X), 'g', label="kmpfit regression Y on X")
frame.plot(X, model((A2,B2_angle),X), 'r', label="kmpfit regression X on Y")
frame.plot(X, model((abi,bbi),X), 'y', label="Bisector")
frame.plot(X, model((a_pearson,numpy.arctan(b_pearson)),X), 'm', lw=2, label="Pearson's values")
frame.plot((x_av,),(y_av,), '+k', markersize=14) # Mark the centroid
frame.set_ylabel("Y")
frame.grid(True)
if i == 1:
frame.set_xlabel("X")
frame.set_xlim(-0.9,-0.3)
frame.set_ylim(6,6.38)
else:
frame.set_title("$\mathrm{Pearson's\ data\ and\ model:\ } y = a+b*x$")
leg = frame.legend(loc=1)
show() | bsd-3-clause |
arjoly/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
andrewowens/camo | src/img.py | 1 | 22096 | import numpy as np
import os, pylab
import itertools as itl
from PIL import Image, ImageDraw, ImageFont
import util as ut
import scipy.misc, scipy.misc.pilutil # not sure if this is necessary
import scipy.ndimage
from StringIO import StringIO
#import cv
def show(*args, **kwargs):
import imtable
return imtable.show(*args, **kwargs)
# Functional code for drawing on images:
def draw_on(f, im):
pil = to_pil(im)
draw = ImageDraw.ImageDraw(pil)
f(draw)
return from_pil(pil)
def color_from_string(s):
""" todo: add more, see matplotlib.colors.cnames """
colors = {'r' : (255, 0, 0), 'g' : (0, 255, 0), 'b' : (0, 0, 255)}
if s in colors:
return colors[s]
else:
ut.fail('unknown color: %s' % s)
def parse_color(c):
if type(c) == type((0,)) or type(c) == type(np.array([1])):
return c
elif type(c) == type(''):
return color_from_string(c)
def colors_from_input(color_input, default, n):
""" Parse color given as input argument; gives user several options """
# todo: generalize this to non-colors
expanded = None
if color_input is None:
expanded = [default] * n
elif (type(color_input) == type((1,))) and map(type, color_input) == [int, int, int]:
# expand (r, g, b) -> [(r, g, b), (r, g, b), ..]
expanded = [color_input] * n
else:
# general case: [(r1, g1, b1), (r2, g2, b2), ...]
expanded = color_input
expanded = map(parse_color, expanded)
return expanded
def draw_rects(im, rects, outlines = None, fills = None, texts = None, text_colors = None, line_widths = None, as_oval = False):
rects = list(rects)
outlines = colors_from_input(outlines, (0, 0, 255), len(rects))
text_colors = colors_from_input(text_colors, (255, 255, 255), len(rects))
fills = colors_from_input(fills, None, len(rects))
if texts is None: texts = [None] * len(rects)
if line_widths is None: line_widths = [None] * len(rects)
def check_size(x, s): ut.check(x is None or len(x) == len(rects), "%s different size from rects" % s)
check_size(outlines, 'outlines')
check_size(fills, 'fills')
check_size(texts, 'texts')
check_size(text_colors, 'texts')
def f(draw):
for (x, y, w, h), outline, fill, text, text_color, lw in itl.izip(rects, outlines, fills, texts, text_colors, line_widths):
if lw is None:
if as_oval:
draw.ellipse((x, y, x + w, y + h), outline = outline, fill = fill)
else:
draw.rectangle((x, y, x + w, y + h), outline = outline, fill = fill)
else:
# TODO: to do this right, we need to find where PIL draws the corners
# x -= lw
# y -= lw
# w += 2*lw
# h += 2*lw
# pts = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)]
# for i in xrange(len(pts)):
# #draw.line(pts[i] + pts[(i+1)%4], fill = outline, width = lw)
# draw.rectangle(pts[i] + pts[(i+1)%4], fill = outline, width = lw)
d = int(np.ceil(lw/2))
draw.rectangle((x-d, y-d, x+w+d, y+d), fill = outline)
draw.rectangle((x-d, y-d, x+d, y+h+d), fill = outline)
draw.rectangle((x+w+d, y+h+d, x-d, y+h-d), fill = outline)
draw.rectangle((x+w+d, y+h+d, x+w-d, y-d), fill = outline)
if text is not None:
# draw text inside rectangle outline
border_width = 2
draw.text((border_width + x, y), text, fill = text_color)
return draw_on(f, im)
def draw_rects_scale(sc, im, rects, outlines = None, fills = None, texts = None, text_colors = None):
scaled_rects = []
for r in rects:
r = np.array(r)
sr = r * sc
if r[2] >= 1 and r[3] >= 1:
sr[2:] = np.maximum(sr[2:], 1.)
scaled_rects.append(sr)
return draw_rects(scale(im, sc), scaled_rects, outlines, fills, texts, text_colors)
def draw_pts(im, points, colors = None, width = 1, texts = None):
#ut.check(colors is None or len(colors) == len(points))
points = list(points)
colors = colors_from_input(colors, (255, 0, 0), len(points))
rects = [(p[0] - width/2, p[1] - width/2, width, width) for p in points]
return draw_rects(im, rects, fills = colors, outlines = [None]*len(points), texts = texts)
def draw_lines(im, pts1, pts2, colors = None, width = 0):
ut.check(len(pts1) == len(pts2), 'Line endpoints different sizes')
colors = colors_from_input(colors, None, len(pts1))
def f(draw):
for p1, p2, c in itl.izip(pts1, pts2, colors):
draw.line(ut.int_tuple(p1) + ut.int_tuple(p2), fill = c, width = width)
return draw_on(f, im)
def draw_text(im, texts, pts, colors, font_size = None):
im = rgb_from_gray(im)
# todo: add fonts, call from draw_rects
ut.check(len(pts) == len(texts))
#ut.check((colors is None) or len(colors) == len(texts))
colors = colors_from_input(colors, (0, 0, 0), len(texts))
def f(draw):
if font_size is None:
font = None
else:
#font_name = '/usr/share/fonts/truetype/ttf-liberation/LiberationMono-Regular.ttf'
font_name = '/usr/share/fonts/truetype/freefont/FreeMono.ttf'
if not os.path.exists(font_name):
raise RuntimeError('need to change hard-coded font path to make this work on other machines')
font = ImageFont.truetype(font_name, size = font_size)
for pt, text, color in itl.izip(pts, texts, colors):
draw.text(ut.int_tuple(pt), text, fill = color, font = font)
return draw_on(f, im)
def draw_text_ul(im, text, color = (0, 255, 0), font_size = 25):
return draw_text(im, [text], [(0, 0)], [color], font_size = font_size)
def luminance(im):
if len(im.shape) == 2:
return im
else:
# see http://www.mathworks.com/help/toolbox/images/ref/rgb2gray.html
return np.uint8(np.round(0.2989 * im[:,:,0] + 0.587 * im[:,:,1] + 0.114 * im[:,:,2]))
#def sub_img(im, x_or_rect, y = None, w = None, h = None):
def sub_img(im, x_or_rect, y = None, w = None, h = None):
if x_or_rect is None:
return im
elif y is None:
x, y, w, h = x_or_rect
else:
x = x_or_rect
return im[y : y + h, x : x + w]
def sub_img_frac(im, x_or_rect, y = None, w = None, h = None):
if y is None:
x, y, w, h = x_or_rect
else:
x = x_or_rect
x = int(x*im.shape[1])
y = int(y*im.shape[0])
w = int(w*im.shape[1])
h = int(h*im.shape[0])
return im[y : y + h, x : x + w]
# def stack_img_pair(im1, im2):
# h1, w1 = im1.shape[:2]
# h2, w2 = im2.shape[:2]
# im3 = np.zeros((max(h1, h2), w1 + w2, 3), dtype = im1.dtype)
# im3[:h1, :w1, :] = rgb_from_gray(im1)
# im3[:h2, w1:, :] = rgb_from_gray(im2)
# return im3
# def stack_imgs(ims):
# """ slow, should rewrite """
# assert len(ims) > 0
# res = ims[0]
# for im in ims[1:]:
# res = stack_img_pair(res, im)
# return res
# def hstack_ims(ims):
# max_h = max(im.shape[0] for im in ims)
# result = []
# for im in ims:
# frame = np.zeros((max_h, im.shape[1], 3))
# frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
# result.append(frame)
# return np.hstack(result)
def hstack_ims(ims, bg_color = (0, 0, 0)):
max_h = max([im.shape[0] for im in ims])
result = []
for im in ims:
#frame = np.zeros((max_h, im.shape[1], 3))
frame = make(im.shape[1], max_h, bg_color)
frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
result.append(frame)
return np.hstack(result)
# def hstack_ims_mult(*all_ims):
# max_h = max(max(im.shape[0] for im in ims) for ims in all_ims)
# result = []
# for im in ims:
# frame = np.zeros((max_h, im.shape[1], 3))
# frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
# result.append(frame)
# return np.hstack(result)
def vstack_ims(ims, bg_color = (0, 0, 0)):
if len(ims) == 0:
return make(0, 0)
max_w = max([im.shape[1] for im in ims])
result = []
for im in ims:
#frame = np.zeros((im.shape[0], max_w, 3))
frame = make(max_w, im.shape[0], bg_color)
frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
result.append(frame)
return np.vstack(result)
def rgb_from_gray(img):
if img.ndim == 3:
return img.copy()
else:
return np.tile(img[:,:,np.newaxis], (1,1,3))
def load(im_fname, gray = False):
if im_fname.endswith('.gif'):
print "GIFs don't load correctly for some reason"
ut.fail('fail')
im = from_pil(Image.open(im_fname))
# use imread, then flip upside down
#im = np.array(list(reversed(pylab.imread(im_fname)[:,:,:3])))
if gray:
return luminance(im)
elif not gray and np.ndim(im) == 2:
return rgb_from_gray(im)
else:
return im
imread = load
def loadsc(fname, scale, gray = False):
return resize(load(fname, gray = gray), scale)
def save(img_fname, a):
if img_fname.endswith('jpg'):
return Image.fromarray(np.uint8(a)).save(img_fname, quality = 100)
else:
#return Image.fromarray(np.uint8(a)).save(img_fname)
return Image.fromarray(np.uint8(a)).save(img_fname, quality = 100)
# def make_temp_file(ext):
# fd, fname = tempfile.mkstemp(ext)
# # shouldn't delete file
# os.close(fd)
# return fname
# def make_pretty(img):
# if img.dtype == 'bool':
# return img * 255
# elif (0 <= np.min(img)) and (np.max(img) <= 1.0):
# return img*255
# return img
def show_html(html):
page = ut.make_temp('.html')
ut.make_file(page, html)
print 'opening', page
webbrowser.open(page)
# # http://opencv.willowgarage.com/wiki/PythonInterface
# def cv2array(im):
# depth2dtype = {
# cv.IPL_DEPTH_8U: 'uint8',
# cv.IPL_DEPTH_8S: 'int8',
# cv.IPL_DEPTH_16U: 'uint16',
# cv.IPL_DEPTH_16S: 'int16',
# cv.IPL_DEPTH_32S: 'int32',
# cv.IPL_DEPTH_32F: 'float32',
# cv.IPL_DEPTH_64F: 'float64',
# }
# arrdtype=im.depth
# a = np.fromstring(
# im.tostring(),
# dtype=depth2dtype[im.depth],
# count=im.width*im.height*im.nChannels)
# a.shape = (im.height,im.width,im.nChannels)
# return a
# def to_cv(a):
# dtype2depth = {
# 'uint8': cv.IPL_DEPTH_8U,
# 'int8': cv.IPL_DEPTH_8S,
# 'uint16': cv.IPL_DEPTH_16U,
# 'int16': cv.IPL_DEPTH_16S,
# 'int32': cv.IPL_DEPTH_32S,
# 'float32': cv.IPL_DEPTH_32F,
# 'float64': cv.IPL_DEPTH_64F,
# }
# try:
# nChannels = a.shape[2]
# except:
# nChannels = 1
# cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
# dtype2depth[str(a.dtype)],
# nChannels)
# cv.SetData(cv_im, a.tostring(),
# a.dtype.itemsize*nChannels*a.shape[1])
# return cv_im
def to_pil(im): return Image.fromarray(np.uint8(im))
def from_pil(pil): return np.array(pil)
def to_pylab(a): return np.uint8(a)
def test_draw_text():
im = 255 + np.zeros((300, 300, 3))
show([draw_text(im, ['hello', 'world'], [(100, 200), (0, 0)], [(255, 0, 0), (0, 255, 0)]),
draw_text(im, ['hello', 'world'], [(100, 100), (0, 0)], [(255, 0, 0), (0, 255, 0)], font_size = 12)])
def save_tmp(im, encoding = '.png', dir = None):
fname = ut.make_temp(encoding, dir = dir)
save(fname, im)
return fname
def save_tmp_nfs(im, encoding = '.png'):
return save_tmp(im, encoding, '/csail/vision-billf5/aho/tmp')
# def resize(im, size):
# if type(size) == type(1):
# size = float(size)
# #return scipy.misc.pilutil.imresize(im, size)
# return scipy.misc.imresize(im, size)
def resize(im, scale, order = 3, hires = True):
if np.ndim(scale) == 0:
new_scale = [scale, scale]
# interpret scale as dimensions; convert integer size to a fractional scale
elif ((scale[0] is None) or type(scale[0]) == type(0)) \
and ((scale[1] is None) or type(scale[1]) == type(0)) \
and (not (scale[0] is None and scale[1] is None)):
# if the size of only one dimension is provided, scale the other to maintain the right aspect ratio
if scale[0] is None:
dims = (int(float(im.shape[0])/im.shape[1]*scale[1]), scale[1])
elif scale[1] is None:
dims = (scale[0], int(float(im.shape[1])/im.shape[0]*scale[0]))
else:
dims = scale[:2]
new_scale = [float(dims[0] + 0.4)/im.shape[0], float(dims[1] + 0.4)/im.shape[1]]
# a test to make sure we set the floating point scale correctly
result_dims = [int(new_scale[0]*im.shape[0]), int(new_scale[1]*im.shape[1])]
assert tuple(result_dims) == tuple(dims)
elif type(scale[0]) == type(0.) and type(scale[1]) == type(0.):
new_scale = scale
#new_scale = scale[1], scale[0]
else:
raise RuntimeError("don't know how to interpret scale: %s" % (scale,))
# want new scale' to be such that
# int(scale'[0]*im.shape[0]) = scale[0], etc. (that's how zoom computes the new shape)
# todo: any more numerical issues?
#print 'scale before', im.shape, scale
# print 'scale after', scale
# print 'new image size', [int(scale[0]*im.shape[0]),int(scale[1]*im.shape[1])]
#scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
if hires:
#sz = map(int, (scale_param*im.shape[1], scale_param*im.shape[0]))
sz = map(int, (scale_param[1]*im.shape[1], scale_param[0]*im.shape[0]))
return from_pil(to_pil(im).resize(sz, Image.ANTIALIAS))
else:
res = scipy.ndimage.zoom(im, scale_param, order = order)
# verify that zoom() returned an image of the desired size
if (np.ndim(scale) != 0) and type(scale[0]) == type(0) and type(scale[1]) == type(0):
assert res.shape[:2] == (scale[0], scale[1])
return res
# import skimage
# resize = skimage.imresize
def test_resize():
im = make(44, 44)
assert resize(im, (121, 120, 't')).shape[:2] == (121, 120)
assert resize(im, (2., 0.5, 't')).shape[:2] == (88, 22)
def show_file(fname):
show(load(fname))
def img_extensions():
return ['png', 'gif', 'jpg', 'jpeg', 'bmp', 'ppm', 'pgm']
def is_img_file(fname):
return any(fname.lower().endswith(ext) for ext in img_extensions())
def blur(im, sigma):
if np.ndim(im) == 2:
return scipy.ndimage.filters.gaussian_filter(im, sigma)
else:
return np.concatenate([scipy.ndimage.filters.gaussian_filter(im[:, :, i], sigma)[:, :, np.newaxis] for i in xrange(im.shape[2])], axis = 2)
def blit(src, dst, x, y, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = src[sy : sy + dh, sx : sx + dw]
def weighted_add(src, dst, x, y, src_weight, dst_weight, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = dst[dy : dy + dh, dx : dx + dw]*dst_weight + src[sy : sy + dh, sx : sx + dw]*src_weight
def make(w, h, fill = (0,0,0)):
return np.uint8(np.tile([[fill]], (h, w, 1)))
def luminance_rgb(im): return rgb_from_gray(luminance(im))
def rotate(img, angle, fill = 0):
""" Rotate image around its center by the given angle (in
radians). No interpolation is used; indices are rounded. The
returned image may be larger than the original, but the middle
pixel corresponds to the middle of the original. Pixels with no
correspondence are filled as 'fill'.
Also returns mapping from original image to rotated. """
r = int(np.ceil(np.sqrt(img.shape[0]**2 + img.shape[1]**2)))
X, Y = np.mgrid[0:r, 0:r]
X = X.flatten()
Y = Y.flatten()
X2 = np.array(np.round(img.shape[1]/2 + np.cos(angle) * (X - r/2) - np.sin(angle) * (Y - r/2)), dtype = int)
Y2 = np.array(np.round(img.shape[0]/2 + np.sin(angle) * (X - r/2) + np.cos(angle) * (Y - r/2)), dtype = int)
good = ut.logical_and_many(X2 >= 0, X2 < img.shape[1], Y2 >= 0, Y2 < img.shape[0])
out = fill + np.zeros((r, r) if img.ndim == 2 else (r, r, img.shape[2]), dtype = img.dtype)
out[Y[good], X[good]] = img[Y2[good], X2[good]]
T = np.dot(np.dot(ut.rigid_transform(np.eye(2), [img.shape[1]/2, img.shape[0]/2]),
ut.rigid_transform(ut.rotation_matrix2(angle))),
ut.rigid_transform(np.eye(2), [-r/2, -r/2]))
return out, np.linalg.inv(T)
def map_img(f, im, dtype = None, components = None):
new_im = np.zeros(im.shape if components is None else im.shape + (components,), \
dtype = im.dtype if dtype is None else dtype)
for y in xrange(im.shape[0]):
for x in xrange(im.shape[1]):
new_im[y,x] = f(im[y,x])
return new_im
def add_border(img, w, h, color = (0, 0, 0)):
assert 0 <= w
assert 0 <= h
out = make(img.shape[1] + 2*w, img.shape[0] + 2*h, color)
out[h:(h + img.shape[0]), w : (w + img.shape[1])] = img
return out
def pad_corner(im, pw, ph, color = (0, 0, 0)):
out = make(im.shape[1] + pw, im.shape[0] + ph, color)
out[:im.shape[0], :im.shape[1]] = im
return out
def expand(im, new_shape, opt = 'center'):
if type(new_shape) == type(0.):
new_w = int(im.shape[1]*new_shape)
new_h = int(im.shape[0]*new_shape)
elif type(new_shape) == type((1,)):
new_shape = new_shape[:2]
new_h, new_w = new_shape
else:
raise RuntimeError("Don't know how to interpret shape")
if im.shape[0] >= new_h and im.shape[1] >= new_w:
return im.copy()
else:
im = rgb_from_gray(im)
r = make(new_w, new_h)
if opt == 'center':
blit(im, r, im.shape[1]/2, im.shape[0]/2, opt = 'center')
elif opt == 'corner':
r[:im.shape[0], :im.shape[1]] = im
return r
def combine_rgb(r, g, b):
a = np.zeros(r.shape + (3,))
a[:,:,0] = r
a[:,:,1] = g
a[:,:,2] = b
return a
def compute_pyramid(ptm, interval, min_size):
# based on pff's featpyramid.m
# todo: upsample one level
sc = 2**(1.0/interval)
imsize = im.shape[:2]
max_scale = int(1 + np.floor(np.log(np.min(imsize)/min_size)/np.log(sc)))
ims = [None]*max_scale
scale = [None]*len(ims)
# skipping 2x scale
for i in xrange(1, interval+1):
im_scaled = resize(ptm, 1/sc**(i-1))
ims[-1 + i] = im_scaled
scale[-1 + i] = 1/sc**(i-1)
for j in xrange(i+interval, max_scale+1, interval):
im_scaled = resize(im_scaled, 0.5)
ims[-1 + j] = im_scaled
scale[-1 + j] = 0.5*scale[-1 + j - interval]
assert None not in ims
return ims, scale
#imrotate = scipy.misc.imrotate
def imrotate(*args):
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return scipy.misc.imrotate(*args)
def from_fig(fig=None):
if fig is None:
fig = pylab.gcf()
IO = StringIO()
pylab.savefig(IO, format = 'png')
IO.seek(0)
return from_pil(Image.open(IO))
def show_fig():
show(from_fig())
def scale_vals(A, lo, hi):
return np.uint8(255*(np.clip(A, lo, hi) - lo) / float(hi - lo))
def merge_ims(srcs, pts_or_rects, bg, opt = None):
""" Makes a new image where each image in patches is copied at a
corresponding pixel location. Overlapping images are averaged
together. """
dst = bg.copy()
layer = np.zeros(dst.shape)
counts = np.zeros(dst.shape[:2], 'l')
for src, r in itl.izip(srcs, pts_or_rects):
r = ut.int_tuple(r)
x, y = r[:2]
# rescale if we're given a rectangle, and it has a different size
if len(r) > 2:
assert len(r) == 4
assert opt != 'center'
if src.shape[:2] != (r[3], r[2]):
src = resize(src, (r[3], r[2]))
elif opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
layer[dy : dy + dh, dx : dx + dw] += src[sy : sy + dh, sx : sx + dw]
counts[dy : dy + dh, dx : dx + dw] += 1
dst[counts > 0] = layer[counts > 0] / counts[counts > 0][:, np.newaxis]
return dst
def label_im(im, text, color = (0, 255, 0)):
return draw_text(im, [text], [(25, im.shape[0] - 25)], [color])
def remap_color(im, xy):
assert im.shape[:2] == xy.shape[:2]
assert xy.shape[2] == 2
vals = []
for i in xrange(im.shape[2]):
dx = xy[..., 0].flatten()[np.newaxis, :]
dy = xy[..., 1].flatten()[np.newaxis, :]
v = scipy.ndimage.map_coordinates(im[..., i], np.concatenate([dy, dx]))
vals.append(v.reshape(im.shape[:2] + (1,)))
return np.concatenate(vals, axis = 2)
def stack_meshgrid(xs, ys, dtype = 'l'):
x, y = np.meshgrid(xs, ys)
return np.array(np.concatenate([x[..., np.newaxis], y[..., np.newaxis]], axis = 2), dtype = dtype)
def sub_img_pad(im, (x, y, w, h), oob = 0):
if len(im.shape) == 2:
dst = np.zeros((h, w))
else:
dst = np.zeros((h, w, im.shape[2]))
dst[:] = oob
sx, sy, sw, sh = ut.crop_rect_to_img((x, y, w, h), im)
dst[(sy - y) : (sy - y) + sh,
(sx - x) : (sx - x) + sw] = im[sy : sy + sh, sx : sx + sw]
return dst
def compress(im, format = 'png'):
out = StringIO()
im = to_pil(im)
im.save(out, format = format)
c = out.getvalue()
out.close()
return c
def uncompress(s):
return from_pil(Image.open(StringIO(s)))
def test_compress():
im = load('/afs/csail.mit.edu/u/a/aho/bear.jpg')
print 'orig', ut.guess_bytes(im)
s = compress(im)
print 'comp', ut.guess_bytes(s)
assert(np.all(im == uncompress(s)))
def mix_ims(im1, im2, mask, alpha = 0.5):
im1 = im1.copy()
im2 = np.asarray(im2)
if len(im2) == 3:
# single color
im1[mask] = im1[mask]*alpha + im2*(1-alpha)
else:
im1[mask] = im1[mask]*alpha + im2[mask]*(1-alpha)
return im1
#def lookup_bilinear(im, x, y, order = 3, mode = 'constant', cval = 0.0):
def lookup_bilinear(im, x, y, order = 1, mode = 'constant', cval = 0.0):
yx = np.array([y, x])
if np.ndim(im) == 2:
return scipy.ndimage.map_coordinates(im, yx, order = order, mode = mode, cval = cval)
else:
return np.concatenate([scipy.ndimage.map_coordinates(im[:, :, i], yx, order = order, mode = mode)[:, np.newaxis] for i in xrange(im.shape[2])], axis = 1)
#def pixels_in_bounds(im, xs, ys):
def pixels_in_bounds(im_shape, xs, ys):
return ut.land(0 <= xs, xs < im_shape[1],
0 <= ys, ys < im_shape[0])
| mit |
simon-pepin/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
maplion/SPEED | Mod13_Predator_Prey.py | 1 | 3662 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SPEED: Module 13: Predator and Prey calculations using Lotka–Volterra equations
GitHub repository: https://github.com/maplion/SPEED
@author: Ryan Dammrose aka MapLion
"""
from matplotlib.pylab import *
from testcalculations import time
# import timeit
import speedcalc
import speedloader
# import speedcli
__author__ = "Ryan Dammrose"
__copyright__ = "Copyright 2015"
__license__ = "MIT"
sl_dc = speedloader.CSV()
# s_cli = speedcli.SpeedCLI(description="SPEED")
# total_time = timeit.timeit('[v for v in range(10000)]', number=10000)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def plotResults(N1, N2, N1_initial, N2_initial, totalTime, timeSteps, dataPoints):
"""
:return:
"""
# Plot results for Predator and Prey
figure(num=None, figsize=(15, 8), dpi=80, facecolor='w', edgecolor='k')
hold()
plot(dataPoints, N1, 'r-', dataPoints, N2, 'b-')
title('Predator and Prey Relationship\n'
'Total Time: {0}, Time Steps: {1}, Initial Prey: {2}, Initial Predator: {3}\n'.format(totalTime,
timeSteps, N1_initial, N2_initial))
xlabel('Time Units')
ylabel('Population')
legend(['Prey', 'Predator'])
show()
def main(argv=None):
"""
This is the main function for Module 13
@param argv: incoming arguments
@return: void
"""
start_time = time.clock()
# Declare local main Variables
# if argv is None:
# argv = sys.argv
try:
# arguments = s_cli.arg_parse(argv)
# get File
# if arguments.file is None:
# sys.exit("No file name given.")
# if arguments.file2 is None:
# sys.exit("No file name given for second file")
# else:
# filename = arguments.inputFilePath + "/" + arguments.file
# if ".csv" not in filename:
# filename += ".csv"
# filename2 = arguments.inputFilePath + "/" + arguments.file2
# if ".csv" not in filename2:
# filename2 += ".csv"
# Set Birth Rate and Death Rate variables
prey_alpha = 0.1
prey_beta = 0.02
predator_gamma = 0.4
predator_delta = 0.02
sc_pp = speedcalc.PredatorPrey(prey_alpha, prey_beta, predator_gamma, predator_delta)
# sc_pp_2 = speedcalc.PredatorPrey(prey_alpha, prey_beta, predator_gamma, predator_delta)
# Set Initial Population, time and time step variables
N1_initial_1 = 10
N2_initial_1 = 10
totalTime_1 = 200
timeSteps_1 = 1000
N1_1, N2_1, dataPoints_1 = sc_pp.lotka_volterra(N1_initial_1, N2_initial_1, totalTime_1, timeSteps_1)
# Set Different Initial Population, time and time step variables
N1_initial_2 = 50
N2_initial_2 = 30
totalTime_2 = 300
timeSteps_2 = 1000
N1_2, N2_2, dataPoints_2 = sc_pp.lotka_volterra(N1_initial_2, N2_initial_2, totalTime_2, timeSteps_2)
plotResults(N1_1, N2_1, N1_initial_1, N2_initial_1, totalTime_1, timeSteps_1, dataPoints_1)
plotResults(N1_2, N2_2, N1_initial_2, N2_initial_2, totalTime_2, timeSteps_2, dataPoints_2)
# Benchmarking
# print "Single Execution Time: {0} seconds".format(round(execution_time, 6))
# print "Total wall-clock time to execute the statement 10000 times: {0}".format(total_time)
# print "Average time per loop: {0}".format(total_time/10000)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if __name__ == "__main__":
sys.exit(main())
| mit |
pyrocko/pyrocko | src/orthodrome.py | 1 | 42455 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
from __future__ import division, absolute_import
import math
import numpy as num
from .moment_tensor import euler_to_matrix
from .config import config
from .plot.beachball import spoly_cut
from matplotlib.path import Path
d2r = math.pi/180.
r2d = 1./d2r
earth_oblateness = 1./298.257223563
earthradius_equator = 6378.14 * 1000.
earthradius = config().earthradius
d2m = earthradius_equator*math.pi/180.
m2d = 1./d2m
_testpath = Path([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)], closed=True)
raise_if_slow_path_contains_points = False
class Slow(Exception):
pass
if hasattr(_testpath, 'contains_points') and num.all(
_testpath.contains_points([(0.5, 0.5), (1.5, 0.5)]) == [True, False]):
def path_contains_points(verts, points):
p = Path(verts, closed=True)
return p.contains_points(points).astype(num.bool)
else:
# work around missing contains_points and bug in matplotlib ~ v1.2.0
def path_contains_points(verts, points):
if raise_if_slow_path_contains_points:
# used by unit test to skip slow gshhg_example.py
raise Slow()
p = Path(verts, closed=True)
result = num.zeros(points.shape[0], dtype=num.bool)
for i in range(result.size):
result[i] = p.contains_point(points[i, :])
return result
try:
cbrt = num.cbrt
except AttributeError:
def cbrt(x):
return x**(1./3.)
def float_array_broadcast(*args):
return num.broadcast_arrays(*[
num.asarray(x, dtype=num.float) for x in args])
class Loc(object):
'''Simple location representation
:attrib lat: Latitude degree
:attrib lon: Longitude degree
'''
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
def clip(x, mi, ma):
''' Clipping data array ``x``
:param x: Continunous data to be clipped
:param mi: Clip minimum
:param ma: Clip maximum
:type x: :py:class:`numpy.ndarray`
:type mi: float
:type ma: float
:return: Clipped data
:rtype: :py:class:`numpy.ndarray`
'''
return num.minimum(num.maximum(mi, x), ma)
def wrap(x, mi, ma):
'''Wrapping continuous data to fundamental phase values.
.. math::
x_{\\mathrm{wrapped}} = x_{\\mathrm{cont},i} -
\\frac{ x_{\\mathrm{cont},i} - r_{\\mathrm{min}} }
{ r_{\\mathrm{max}} - r_{\\mathrm{min}}}
\\cdot ( r_{\\mathrm{max}} - r_{\\mathrm{min}}),\\quad
x_{\\mathrm{wrapped}}\\; \\in
\\;[ r_{\\mathrm{min}},\\, r_{\\mathrm{max}}].
:param x: Continunous data to be wrapped
:param mi: Minimum value of wrapped data
:param ma: Maximum value of wrapped data
:type x: :py:class:`numpy.ndarray`
:type mi: float
:type ma: float
:return: Wrapped data
:rtype: :py:class:`numpy.ndarray`
'''
return x - num.floor((x-mi)/(ma-mi)) * (ma-mi)
def _latlon_pair(args):
if len(args) == 2:
a, b = args
return a.lat, a.lon, b.lat, b.lon
elif len(args) == 4:
return args
def cosdelta(*args):
'''Cosine of the angular distance between two points ``a`` and ``b`` on
a sphere.
This function (find implementation below) returns the cosine of the
distance angle 'delta' between two points ``a`` and ``b``, coordinates of
which are expected to be given in geographical coordinates and in degrees.
For numerical stability a maximum of 1.0 is enforced.
.. math::
A_{\\mathrm{lat'}} = \\frac{ \\pi}{180} \\cdot A_{lat}, \\quad
A_{\\mathrm{lon'}} = \\frac{ \\pi}{180} \\cdot A_{lon}, \\quad
B_{\\mathrm{lat'}} = \\frac{ \\pi}{180} \\cdot B_{lat}, \\quad
B_{\\mathrm{lon'}} = \\frac{ \\pi}{180} \\cdot B_{lon}\\\\[0.5cm]
\\cos(\\Delta) = \\min( 1.0, \\quad \\sin( A_{\\mathrm{lat'}})
\\sin( B_{\\mathrm{lat'}} ) +
\\cos(A_{\\mathrm{lat'}}) \\cos( B_{\\mathrm{lat'}} )
\\cos( B_{\\mathrm{lon'}} - A_{\\mathrm{lon'}} )
:param a: Location point A
:type a: :py:class:`pyrocko.orthodrome.Loc`
:param b: Location point B
:type b: :py:class:`pyrocko.orthodrome.Loc`
:return: cosdelta
:rtype: float
'''
alat, alon, blat, blon = _latlon_pair(args)
return min(
1.0,
math.sin(alat*d2r) * math.sin(blat*d2r) +
math.cos(alat*d2r) * math.cos(blat*d2r) *
math.cos(d2r*(blon-alon)))
def cosdelta_numpy(a_lats, a_lons, b_lats, b_lons):
'''Cosine of the angular distance between two points ``a`` and ``b``
on a sphere.
This function returns the cosines of the distance
angles *delta* between two points ``a`` and ``b`` given as
:py:class:`numpy.ndarray`.
The coordinates are expected to be given in geographical coordinates
and in degrees. For numerical stability a maximum of ``1.0`` is enforced.
Please find the details of the implementation in the documentation of
the function :py:func:`pyrocko.orthodrome.cosdelta` above.
:param a_lats: Latitudes (degree) point A
:param a_lons: Longitudes (degree) point A
:param b_lats: Latitudes (degree) point B
:param b_lons: Longitudes (degree) point B
:type a_lats: :py:class:`numpy.ndarray`
:type a_lons: :py:class:`numpy.ndarray`
:type b_lats: :py:class:`numpy.ndarray`
:type b_lons: :py:class:`numpy.ndarray`
:return: cosdelta
:type b_lons: :py:class:`numpy.ndarray`, ``(N)``
'''
return num.minimum(
1.0,
num.sin(a_lats*d2r) * num.sin(b_lats*d2r) +
num.cos(a_lats*d2r) * num.cos(b_lats*d2r) *
num.cos(d2r*(b_lons-a_lons)))
def azimuth(*args):
'''Azimuth calculation
This function (find implementation below) returns azimuth ...
between points ``a`` and ``b``, coordinates of
which are expected to be given in geographical coordinates and in degrees.
.. math::
A_{\\mathrm{lat'}} = \\frac{ \\pi}{180} \\cdot A_{lat}, \\quad
A_{\\mathrm{lon'}} = \\frac{ \\pi}{180} \\cdot A_{lon}, \\quad
B_{\\mathrm{lat'}} = \\frac{ \\pi}{180} \\cdot B_{lat}, \\quad
B_{\\mathrm{lon'}} = \\frac{ \\pi}{180} \\cdot B_{lon}\\\\
\\varphi_{\\mathrm{azi},AB} = \\frac{180}{\\pi} \\arctan \\left[
\\frac{
\\cos( A_{\\mathrm{lat'}}) \\cos( B_{\\mathrm{lat'}} )
\\sin(B_{\\mathrm{lon'}} - A_{\\mathrm{lon'}} )}
{\\sin ( B_{\\mathrm{lat'}} ) - \\sin( A_{\\mathrm{lat'}}
cosdelta) } \\right]
:param a: Location point A
:type a: :py:class:`pyrocko.orthodrome.Loc`
:param b: Location point B
:type b: :py:class:`pyrocko.orthodrome.Loc`
:return: Azimuth in degree
'''
alat, alon, blat, blon = _latlon_pair(args)
return r2d*math.atan2(
math.cos(alat*d2r) * math.cos(blat*d2r) *
math.sin(d2r*(blon-alon)),
math.sin(d2r*blat) - math.sin(d2r*alat) * cosdelta(
alat, alon, blat, blon))
def azimuth_numpy(a_lats, a_lons, b_lats, b_lons, _cosdelta=None):
'''Calculation of the azimuth (*track angle*) from a location A towards B.
This function returns azimuths (*track angles*) from locations A towards B
given in :py:class:`numpy.ndarray`. Coordinates are expected to be given in
geographical coordinates and in degrees.
Please find the details of the implementation in the documentation of the
function :py:func:`pyrocko.orthodrome.azimuth`.
:param a_lats: Latitudes (degree) point A
:param a_lons: Longitudes (degree) point A
:param b_lats: Latitudes (degree) point B
:param b_lons: Longitudes (degree) point B
:type a_lats: :py:class:`numpy.ndarray`, ``(N)``
:type a_lons: :py:class:`numpy.ndarray`, ``(N)``
:type b_lats: :py:class:`numpy.ndarray`, ``(N)``
:type b_lons: :py:class:`numpy.ndarray`, ``(N)``
:return: Azimuths in degrees
:rtype: :py:class:`numpy.ndarray`, ``(N)``
'''
if _cosdelta is None:
_cosdelta = cosdelta_numpy(a_lats, a_lons, b_lats, b_lons)
return r2d*num.arctan2(
num.cos(a_lats*d2r) * num.cos(b_lats*d2r) *
num.sin(d2r*(b_lons-a_lons)),
num.sin(d2r*b_lats) - num.sin(d2r*a_lats) * _cosdelta)
def azibazi(*args, **kwargs):
alat, alon, blat, blon = _latlon_pair(args)
if alat == blat and alon == blon:
return 0., 180.
implementation = kwargs.get('implementation', 'c')
assert implementation in ('c', 'python')
if implementation == 'c':
from pyrocko import orthodrome_ext
return orthodrome_ext.azibazi(alat, alon, blat, blon)
cd = cosdelta(alat, alon, blat, blon)
azi = r2d*math.atan2(
math.cos(alat*d2r) * math.cos(blat*d2r) *
math.sin(d2r*(blon-alon)),
math.sin(d2r*blat) - math.sin(d2r*alat) * cd)
bazi = r2d*math.atan2(
math.cos(blat*d2r) * math.cos(alat*d2r) *
math.sin(d2r*(alon-blon)),
math.sin(d2r*alat) - math.sin(d2r*blat) * cd)
return azi, bazi
def azibazi_numpy(a_lats, a_lons, b_lats, b_lons, implementation='c'):
a_lats, a_lons, b_lats, b_lons = float_array_broadcast(
a_lats, a_lons, b_lats, b_lons)
assert implementation in ('c', 'python')
if implementation == 'c':
from pyrocko import orthodrome_ext
return orthodrome_ext.azibazi_numpy(a_lats, a_lons, b_lats, b_lons)
_cosdelta = cosdelta_numpy(a_lats, a_lons, b_lats, b_lons)
azis = azimuth_numpy(a_lats, a_lons, b_lats, b_lons, _cosdelta)
bazis = azimuth_numpy(b_lats, b_lons, a_lats, a_lons, _cosdelta)
eq = num.logical_and(a_lats == b_lats, a_lons == b_lons)
ii_eq = num.where(eq)[0]
azis[ii_eq] = 0.0
bazis[ii_eq] = 180.0
return azis, bazis
def azidist_numpy(*args):
'''Calculation of the azimuth (*track angle*) and the distance from
locations A towards B on a sphere.
The assisting functions used are :py:func:`pyrocko.orthodrome.cosdelta` and
:py:func:`pyrocko.orthodrome.azimuth`
:param a_lats: Latitudes (degree) point A
:param a_lons: Longitudes (degree) point A
:param b_lats: Latitudes (degree) point B
:param b_lons: Longitudes (degree) point B
:type a_lats: :py:class:`numpy.ndarray`, ``(N)``
:type a_lons: :py:class:`numpy.ndarray`, ``(N)``
:type b_lats: :py:class:`numpy.ndarray`, ``(N)``
:type b_lons: :py:class:`numpy.ndarray`, ``(N)``
:return: Azimuths in degrees, distances in degrees
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
'''
_cosdelta = cosdelta_numpy(*args)
_azimuths = azimuth_numpy(_cosdelta=_cosdelta, *args)
return _azimuths, r2d*num.arccos(_cosdelta)
def distance_accurate50m(*args, **kwargs):
''' Accurate distance calculation based on a spheroid of rotation.
Function returns distance in meter between points A and B, coordinates of
which must be given in geographical coordinates and in degrees.
The returned distance should be accurate to 50 m using WGS84.
Values for the Earth's equator radius and the Earth's oblateness
(``f_oblate``) are defined in the pyrocko configuration file
:py:class:`pyrocko.config`.
From wikipedia (http://de.wikipedia.org/wiki/Orthodrome), based on:
``Meeus, J.: Astronomical Algorithms, S 85, Willmann-Bell,
Richmond 2000 (2nd ed., 2nd printing), ISBN 0-943396-61-1``
.. math::
F = \\frac{\\pi}{180}
\\frac{(A_{lat} + B_{lat})}{2}, \\quad
G = \\frac{\\pi}{180}
\\frac{(A_{lat} - B_{lat})}{2}, \\quad
l = \\frac{\\pi}{180}
\\frac{(A_{lon} - B_{lon})}{2} \\quad
\\\\[0.5cm]
S = \\sin^2(G) \\cdot \\cos^2(l) +
\\cos^2(F) \\cdot \\sin^2(l), \\quad \\quad
C = \\cos^2(G) \\cdot \\cos^2(l) +
\\sin^2(F) \\cdot \\sin^2(l)
.. math::
w = \\arctan \\left( \\sqrt{ \\frac{S}{C}} \\right) , \\quad
r = \\sqrt{\\frac{S}{C} }
The spherical-earth distance D between A and B, can be given with:
.. math::
D_{sphere} = 2w \\cdot R_{equator}
The oblateness of the Earth requires some correction with
correction factors h1 and h2:
.. math::
h_1 = \\frac{3r - 1}{2C}, \\quad
h_2 = \\frac{3r +1 }{2S}\\\\[0.5cm]
D = D_{\\mathrm{sphere}} \\cdot [ 1 + h_1 \\,f_{\\mathrm{oblate}}
\\cdot \\sin^2(F)
\\cos^2(G) - h_2\\, f_{\\mathrm{oblate}}
\\cdot \\cos^2(F) \\sin^2(G)]
:param a: Location point A
:type a: :py:class:`pyrocko.orthodrome.Loc`
:param b: Location point B
:type b: :py:class:`pyrocko.orthodrome.Loc`
:return: Distance in meter
:rtype: float
'''
alat, alon, blat, blon = _latlon_pair(args)
implementation = kwargs.get('implementation', 'c')
assert implementation in ('c', 'python')
if implementation == 'c':
from pyrocko import orthodrome_ext
return orthodrome_ext.distance_accurate50m(alat, alon, blat, blon)
f = (alat + blat)*d2r / 2.
g = (alat - blat)*d2r / 2.
h = (alon - blon)*d2r / 2.
s = math.sin(g)**2 * math.cos(h)**2 + math.cos(f)**2 * math.sin(h)**2
c = math.cos(g)**2 * math.cos(h)**2 + math.sin(f)**2 * math.sin(h)**2
w = math.atan(math.sqrt(s/c))
if w == 0.0:
return 0.0
r = math.sqrt(s*c)/w
d = 2.*w*earthradius_equator
h1 = (3.*r-1.)/(2.*c)
h2 = (3.*r+1.)/(2.*s)
return d * (1. +
earth_oblateness * h1 * math.sin(f)**2 * math.cos(g)**2 -
earth_oblateness * h2 * math.cos(f)**2 * math.sin(g)**2)
def distance_accurate50m_numpy(
a_lats, a_lons, b_lats, b_lons, implementation='c'):
''' Accurate distance calculation based on a spheroid of rotation.
Function returns distance in meter between points ``a`` and ``b``,
coordinates of which must be given in geographical coordinates and in
degrees.
The returned distance should be accurate to 50 m using WGS84.
Values for the Earth's equator radius and the Earth's oblateness
(``f_oblate``) are defined in the pyrocko configuration file
:py:class:`pyrocko.config`.
From wikipedia (http://de.wikipedia.org/wiki/Orthodrome), based on:
``Meeus, J.: Astronomical Algorithms, S 85, Willmann-Bell,
Richmond 2000 (2nd ed., 2nd printing), ISBN 0-943396-61-1``
.. math::
F_i = \\frac{\\pi}{180}
\\frac{(a_{lat,i} + a_{lat,i})}{2}, \\quad
G_i = \\frac{\\pi}{180}
\\frac{(a_{lat,i} - b_{lat,i})}{2}, \\quad
l_i= \\frac{\\pi}{180}
\\frac{(a_{lon,i} - b_{lon,i})}{2} \\\\[0.5cm]
S_i = \\sin^2(G_i) \\cdot \\cos^2(l_i) +
\\cos^2(F_i) \\cdot \\sin^2(l_i), \\quad \\quad
C_i = \\cos^2(G_i) \\cdot \\cos^2(l_i) +
\\sin^2(F_i) \\cdot \\sin^2(l_i)
.. math::
w_i = \\arctan \\left( \\sqrt{\\frac{S_i}{C_i}} \\right), \\quad
r_i = \\sqrt{\\frac{S_i}{C_i} }
The spherical-earth distance ``D`` between ``a`` and ``b``,
can be given with:
.. math::
D_{\\mathrm{sphere},i} = 2w_i \\cdot R_{\\mathrm{equator}}
The oblateness of the Earth requires some correction with
correction factors ``h1`` and ``h2``:
.. math::
h_{1.i} = \\frac{3r - 1}{2C_i}, \\quad
h_{2,i} = \\frac{3r +1 }{2S_i}\\\\[0.5cm]
D_{AB,i} = D_{\\mathrm{sphere},i} \\cdot [1 + h_{1,i}
\\,f_{\\mathrm{oblate}}
\\cdot \\sin^2(F_i)
\\cos^2(G_i) - h_{2,i}\\, f_{\\mathrm{oblate}}
\\cdot \\cos^2(F_i) \\sin^2(G_i)]
:param a_lats: Latitudes (degree) point A
:param a_lons: Longitudes (degree) point A
:param b_lats: Latitudes (degree) point B
:param b_lons: Longitudes (degree) point B
:type a_lats: :py:class:`numpy.ndarray`, ``(N)``
:type a_lons: :py:class:`numpy.ndarray`, ``(N)``
:type b_lats: :py:class:`numpy.ndarray`, ``(N)``
:type b_lons: :py:class:`numpy.ndarray`, ``(N)``
:return: Distances in meter
:rtype: :py:class:`numpy.ndarray`, ``(N)``
'''
a_lats, a_lons, b_lats, b_lons = float_array_broadcast(
a_lats, a_lons, b_lats, b_lons)
assert implementation in ('c', 'python')
if implementation == 'c':
from pyrocko import orthodrome_ext
return orthodrome_ext.distance_accurate50m_numpy(
a_lats, a_lons, b_lats, b_lons)
eq = num.logical_and(a_lats == b_lats, a_lons == b_lons)
ii_neq = num.where(num.logical_not(eq))[0]
if num.all(eq):
return num.zeros_like(eq, dtype=num.float)
def extr(x):
if isinstance(x, num.ndarray) and x.size > 1:
return x[ii_neq]
else:
return x
a_lats = extr(a_lats)
a_lons = extr(a_lons)
b_lats = extr(b_lats)
b_lons = extr(b_lons)
f = (a_lats + b_lats)*d2r / 2.
g = (a_lats - b_lats)*d2r / 2.
h = (a_lons - b_lons)*d2r / 2.
s = num.sin(g)**2 * num.cos(h)**2 + num.cos(f)**2 * num.sin(h)**2
c = num.cos(g)**2 * num.cos(h)**2 + num.sin(f)**2 * num.sin(h)**2
w = num.arctan(num.sqrt(s/c))
r = num.sqrt(s*c)/w
d = 2.*w*earthradius_equator
h1 = (3.*r-1.)/(2.*c)
h2 = (3.*r+1.)/(2.*s)
dists = num.zeros(eq.size, dtype=num.float)
dists[ii_neq] = d * (
1. +
earth_oblateness * h1 * num.sin(f)**2 * num.cos(g)**2 -
earth_oblateness * h2 * num.cos(f)**2 * num.sin(g)**2)
return dists
def ne_to_latlon(lat0, lon0, north_m, east_m):
'''Transform local cartesian coordinates to latitude and longitude.
From east and north coordinates (``x`` and ``y`` coordinate
:py:class:`numpy.ndarray`) relative to a reference differences in
longitude and latitude are calculated, which are effectively changes in
azimuth and distance, respectively:
.. math::
\\text{distance change:}\\; \\Delta {\\bf{a}} &= \\sqrt{{\\bf{y}}^2 +
{\\bf{x}}^2 }/ \\mathrm{R_E},
\\text{azimuth change:}\\; \\Delta \\bf{\\gamma} &= \\arctan( \\bf{x}
/ \\bf{y}).
The projection used preserves the azimuths of the input points.
:param lat0: Latitude origin of the cartesian coordinate system.
:param lon0: Longitude origin of the cartesian coordinate system.
:param north_m: Northing distances from origin in meters.
:param east_m: Easting distances from origin in meters.
:type north_m: :py:class:`numpy.ndarray`, ``(N)``
:type east_m: :py:class:`numpy.ndarray`, ``(N)``
:type lat0: float
:type lon0: float
:return: Array with latitudes and longitudes
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
'''
a = num.sqrt(north_m**2+east_m**2)/earthradius
gamma = num.arctan2(east_m, north_m)
return azidist_to_latlon_rad(lat0, lon0, gamma, a)
def azidist_to_latlon(lat0, lon0, azimuth_deg, distance_deg):
'''(Durchreichen??).
'''
return azidist_to_latlon_rad(
lat0, lon0, azimuth_deg/180.*num.pi, distance_deg/180.*num.pi)
def azidist_to_latlon_rad(lat0, lon0, azimuth_rad, distance_rad):
''' Absolute latitudes and longitudes are calculated from relative changes.
For numerical stability a range between of ``-1.0`` and ``1.0`` is
enforced for ``c`` and ``alpha``.
.. math::
\\Delta {\\bf a}_i \\; \\text{and} \\; \\Delta \\gamma_i \\;
\\text{are relative distances and azimuths from lat0 and lon0 for
\\textit{i} source points of a finite source.}
.. math::
\\mathrm{b} &= \\frac{\\pi}{2} -\\frac{\\pi}{180}\\;\\mathrm{lat_0}\\\\
{\\bf c}_i &=\\arccos[\\; \\cos(\\Delta {\\bf{a}}_i)
\\cos(\\mathrm{b}) + |\\Delta \\gamma_i| \\,
\\sin(\\Delta {\\bf a}_i)
\\sin(\\mathrm{b})\\; ] \\\\
\\mathrm{lat}_i &= \\frac{180}{\\pi}
\\left(\\frac{\\pi}{2} - {\\bf c}_i \\right)
.. math::
\\alpha_i &= \\arcsin \\left[ \\; \\frac{ \\sin(\\Delta {\\bf a}_i )
\\sin(|\\Delta \\gamma_i|)}{\\sin({\\bf c}_i)}\\;
\\right] \\\\
\\alpha_i &= \\begin{cases}
\\alpha_i, &\\text{if} \\; \\cos(\\Delta {\\bf a}_i) -
\\cos(\\mathrm{b}) \\cos({\\bf{c}}_i) > 0, \\;
\\text{else} \\\\
\\pi - \\alpha_i, & \\text{if} \\; \\alpha_i > 0,\\;
\\text{else}\\\\
-\\pi - \\alpha_i, & \\text{if} \\; \\alpha_i < 0.
\\end{cases} \\\\
\\mathrm{lon}_i &= \\mathrm{lon_0} +
\\frac{180}{\\pi} \\,
\\frac{\\Delta \\gamma_i }{|\\Delta \\gamma_i|}
\\cdot \\alpha_i
\\text{, with $\\alpha_i \\in [-\\pi,\\pi]$}
:param lat0: Latitude origin of the cartesian coordinate system.
:param lon0: Longitude origin of the cartesian coordinate system.
:param distance_rad: Distances from origin in radians.
:param azimuth_rad: Azimuth from radians.
:type distance_rad: :py:class:`numpy.ndarray`, ``(N)``
:type azimuth_rad: :py:class:`numpy.ndarray`, ``(N)``
:type lat0: float
:type lon0: float
:return: Array with latitudes and longitudes
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
'''
a = distance_rad
gamma = azimuth_rad
b = math.pi/2.-lat0*d2r
alphasign = 1.
alphasign = num.where(gamma < 0, -1., 1.)
gamma = num.abs(gamma)
c = num.arccos(clip(
num.cos(a)*num.cos(b)+num.sin(a)*num.sin(b)*num.cos(gamma), -1., 1.))
alpha = num.arcsin(clip(
num.sin(a)*num.sin(gamma)/num.sin(c), -1., 1.))
alpha = num.where(
num.cos(a)-num.cos(b)*num.cos(c) < 0,
num.where(alpha > 0, math.pi-alpha, -math.pi-alpha),
alpha)
lat = r2d * (math.pi/2. - c)
lon = wrap(lon0 + r2d*alpha*alphasign, -180., 180.)
return lat, lon
def ne_to_latlon_alternative_method(lat0, lon0, north_m, east_m):
'''Transform local cartesian coordinates to latitude and longitude.
Like :py:func:`pyrocko.orthodrome.ne_to_latlon`,
but this method (implementation below), although it should be numerically
more stable, suffers problems at points which are *across the pole*
as seen from the cartesian origin.
.. math::
\\text{distance change:}\\; \\Delta {{\\bf a}_i} &=
\\sqrt{{\\bf{y}}^2_i + {\\bf{x}}^2_i }/ \\mathrm{R_E},\\\\
\\text{azimuth change:}\\; \\Delta {\\bf \\gamma}_i &=
\\arctan( {\\bf x}_i {\\bf y}_i). \\\\
\\mathrm{b} &=
\\frac{\\pi}{2} -\\frac{\\pi}{180} \\;\\mathrm{lat_0}\\\\
.. math::
{{\\bf z}_1}_i &= \\cos{\\left( \\frac{\\Delta {\\bf a}_i -
\\mathrm{b}}{2} \\right)}
\\cos {\\left( \\frac{|\\gamma_i|}{2} \\right) }\\\\
{{\\bf n}_1}_i &= \\cos{\\left( \\frac{\\Delta {\\bf a}_i +
\\mathrm{b}}{2} \\right)}
\\sin {\\left( \\frac{|\\gamma_i|}{2} \\right) }\\\\
{{\\bf z}_2}_i &= \\sin{\\left( \\frac{\\Delta {\\bf a}_i -
\\mathrm{b}}{2} \\right)}
\\cos {\\left( \\frac{|\\gamma_i|}{2} \\right) }\\\\
{{\\bf n}_2}_i &= \\sin{\\left( \\frac{\\Delta {\\bf a}_i +
\\mathrm{b}}{2} \\right)}
\\sin {\\left( \\frac{|\\gamma_i|}{2} \\right) }\\\\
{{\\bf t}_1}_i &= \\arctan{\\left( \\frac{{{\\bf z}_1}_i}
{{{\\bf n}_1}_i} \\right) }\\\\
{{\\bf t}_2}_i &= \\arctan{\\left( \\frac{{{\\bf z}_2}_i}
{{{\\bf n}_2}_i} \\right) } \\\\[0.5cm]
c &= \\begin{cases}
2 \\cdot \\arccos \\left( {{\\bf z}_1}_i / \\sin({{\\bf t}_1}_i)
\\right),\\; \\text{if }
|\\sin({{\\bf t}_1}_i)| >
|\\sin({{\\bf t}_2}_i)|,\\; \\text{else} \\\\
2 \\cdot \\arcsin{\\left( {{\\bf z}_2}_i /
\\sin({{\\bf t}_2}_i) \\right)}.
\\end{cases}\\\\
.. math::
{\\bf {lat}}_i &= \\frac{180}{ \\pi } \\left( \\frac{\\pi}{2}
- {\\bf {c}}_i \\right) \\\\
{\\bf {lon}}_i &= {\\bf {lon}}_0 + \\frac{180}{ \\pi }
\\frac{\\gamma_i}{|\\gamma_i|},
\\text{ with}\\; \\gamma \\in [-\\pi,\\pi]
:param lat0: Latitude origin of the cartesian coordinate system.
:param lon0: Longitude origin of the cartesian coordinate system.
:param north_m: Northing distances from origin in meters.
:param east_m: Easting distances from origin in meters.
:type north_m: :py:class:`numpy.ndarray`, ``(N)``
:type east_m: :py:class:`numpy.ndarray`, ``(N)``
:type lat0: float
:type lon0: float
:return: Array with latitudes and longitudes
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
'''
b = math.pi/2.-lat0*d2r
a = num.sqrt(north_m**2+east_m**2)/earthradius
gamma = num.arctan2(east_m, north_m)
alphasign = 1.
alphasign = num.where(gamma < 0., -1., 1.)
gamma = num.abs(gamma)
z1 = num.cos((a-b)/2.)*num.cos(gamma/2.)
n1 = num.cos((a+b)/2.)*num.sin(gamma/2.)
z2 = num.sin((a-b)/2.)*num.cos(gamma/2.)
n2 = num.sin((a+b)/2.)*num.sin(gamma/2.)
t1 = num.arctan2(z1, n1)
t2 = num.arctan2(z2, n2)
alpha = t1 + t2
sin_t1 = num.sin(t1)
sin_t2 = num.sin(t2)
c = num.where(
num.abs(sin_t1) > num.abs(sin_t2),
num.arccos(z1/sin_t1)*2.,
num.arcsin(z2/sin_t2)*2.)
lat = r2d * (math.pi/2. - c)
lon = wrap(lon0 + r2d*alpha*alphasign, -180., 180.)
return lat, lon
def latlon_to_ne(*args):
'''Relative cartesian coordinates with respect to a reference location.
For two locations, a reference location A and another location B, given in
geographical coordinates in degrees, the corresponding cartesian
coordinates are calculated.
Assisting functions are :py:func:`pyrocko.orthodrome.azimuth` and
:py:func:`pyrocko.orthodrome.distance_accurate50m`.
.. math::
D_{AB} &= \\mathrm{distance\\_accurate50m(}A, B \\mathrm{)}, \\quad
\\varphi_{\\mathrm{azi},AB} = \\mathrm{azimuth(}A,B
\\mathrm{)}\\\\[0.3cm]
n &= D_{AB} \\cdot \\cos( \\frac{\\pi }{180}
\\varphi_{\\mathrm{azi},AB} )\\\\
e &= D_{AB} \\cdot
\\sin( \\frac{\\pi }{180} \\varphi_{\\mathrm{azi},AB})
:param refloc: Location reference point
:type refloc: :py:class:`pyrocko.orthodrome.Loc`
:param loc: Location of interest
:type loc: :py:class:`pyrocko.orthodrome.Loc`
:return: Northing and easting from refloc to location
:rtype: tuple, float
'''
azi = azimuth(*args)
dist = distance_accurate50m(*args)
n, e = math.cos(azi*d2r)*dist, math.sin(azi*d2r)*dist
return n, e
def latlon_to_ne_numpy(lat0, lon0, lat, lon):
'''Relative cartesian coordinates with respect to a reference location.
For two locations, a reference location (``lat0``, ``lon0``) and another
location B, given in geographical coordinates in degrees,
the corresponding cartesian coordinates are calculated.
Assisting functions are :py:func:`azimuth`
and :py:func:`distance_accurate50m`.
:param lat0: reference location latitude
:param lon0: reference location longitude
:param lat: absolute location latitude
:param lon: absolute location longitude
:return: ``(n, e)``: relative north and east positions
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
Implemented formulations:
.. math::
D_{AB} &= \\mathrm{distance\\_accurate50m(}A, B \\mathrm{)}, \\quad
\\varphi_{\\mathrm{azi},AB} = \\mathrm{azimuth(}A,B
\\mathrm{)}\\\\[0.3cm]
n &= D_{AB} \\cdot \\cos( \\frac{\\pi }{180} \\varphi_{
\\mathrm{azi},AB} )\\\\
e &= D_{AB} \\cdot \\sin( \\frac{\\pi }{180} \\varphi_{
\\mathrm{azi},AB} )
'''
azi = azimuth_numpy(lat0, lon0, lat, lon)
dist = distance_accurate50m_numpy(lat0, lon0, lat, lon)
n = num.cos(azi*d2r)*dist
e = num.sin(azi*d2r)*dist
return n, e
_wgs84 = None
def get_wgs84():
global _wgs84
if _wgs84 is None:
from geographiclib.geodesic import Geodesic
_wgs84 = Geodesic.WGS84
return _wgs84
def amap(n):
def wrap(f):
if n == 1:
def func(*args):
it = num.nditer(args + (None,))
for ops in it:
ops[-1][...] = f(*ops[:-1])
return it.operands[-1]
elif n == 2:
def func(*args):
it = num.nditer(args + (None, None))
for ops in it:
ops[-2][...], ops[-1][...] = f(*ops[:-2])
return it.operands[-2], it.operands[-1]
return func
return wrap
@amap(2)
def ne_to_latlon2(lat0, lon0, north_m, east_m):
wgs84 = get_wgs84()
az = num.arctan2(east_m, north_m)*r2d
dist = num.sqrt(east_m**2 + north_m**2)
x = wgs84.Direct(lat0, lon0, az, dist)
return x['lat2'], x['lon2']
@amap(2)
def latlon_to_ne2(lat0, lon0, lat1, lon1):
wgs84 = get_wgs84()
x = wgs84.Inverse(lat0, lon0, lat1, lon1)
dist = x['s12']
az = x['azi1']
n = num.cos(az*d2r)*dist
e = num.sin(az*d2r)*dist
return n, e
@amap(1)
def distance_accurate15nm(lat1, lon1, lat2, lon2):
wgs84 = get_wgs84()
return wgs84.Inverse(lat1, lon1, lat2, lon2)['s12']
def positive_region(region):
'''Normalize parameterization of a rectangular geographical region.
:param region: ``(west, east, south, north)``
:returns: ``(west, east, south, north)``, where ``west <= east`` and
where ``west`` and ``east`` are in the range ``[-180., 180.+360.[``
'''
west, east, south, north = [float(x) for x in region]
assert -180. - 360. <= west < 180.
assert -180. < east <= 180. + 360.
assert -90. <= south < 90.
assert -90. < north <= 90.
if east < west:
east += 360.
if west < -180.:
west += 360.
east += 360.
return (west, east, south, north)
def points_in_region(p, region):
'''
Check what points are contained in a rectangular geographical region.
:param p: NumPy array of shape ``(N, 2)`` where each row is a
``(lat, lon)`` pair [deg]
:param region: ``(west, east, south, north)`` [deg]
:returns: NumPy array of shape ``(N)``, type ``bool``
'''
w, e, s, n = positive_region(region)
return num.logical_and(
num.logical_and(s <= p[:, 0], p[:, 0] <= n),
num.logical_or(
num.logical_and(w <= p[:, 1], p[:, 1] <= e),
num.logical_and(w-360. <= p[:, 1], p[:, 1] <= e-360.)))
def point_in_region(p, region):
'''
Check if a point is contained in a rectangular geographical region.
:param p: ``(lat, lon)`` [deg]
:param region: ``(west, east, south, north)`` [deg]
:returns: ``bool``
'''
w, e, s, n = positive_region(region)
return num.logical_and(
num.logical_and(s <= p[0], p[0] <= n),
num.logical_or(
num.logical_and(w <= p[1], p[1] <= e),
num.logical_and(w-360. <= p[1], p[1] <= e-360.)))
def radius_to_region(lat, lon, radius):
'''
Get a rectangular region which fully contains a given circular region.
:param lat,lon: center of circular region [deg]
:param radius: radius of circular region [m]
:return: rectangular region as ``(east, west, south, north)`` [deg]
'''
radius_deg = radius * m2d
if radius_deg < 45.:
lat_min = max(-90., lat - radius_deg)
lat_max = min(90., lat + radius_deg)
absmaxlat = max(abs(lat_min), abs(lat_max))
if absmaxlat > 89:
lon_min = -180.
lon_max = 180.
else:
lon_min = max(
-180. - 360.,
lon - radius_deg / math.cos(absmaxlat*d2r))
lon_max = min(
180. + 360.,
lon + radius_deg / math.cos(absmaxlat*d2r))
lon_min, lon_max, lat_min, lat_max = positive_region(
(lon_min, lon_max, lat_min, lat_max))
return lon_min, lon_max, lat_min, lat_max
else:
return None
def geographic_midpoint(lats, lons, weights=None):
'''Calculate geographic midpoints by finding the center of gravity.
This method suffers from instabilities if points are centered around the
poles.
:param lats: array of latitudes
:param lons: array of longitudes
:param weights: array weighting factors (optional)
:type lats: :py:class:`numpy.ndarray`, ``(N)``
:type lons: :py:class:`numpy.ndarray`, ``(N)``
:type weights: :py:class:`numpy.ndarray`, ``(N)``
:return: Latitudes and longitudes of the modpoints
:rtype: :py:class:`numpy.ndarray`, ``(2xN)``
'''
if not weights:
weights = num.ones(len(lats))
total_weigth = num.sum(weights)
weights /= total_weigth
lats = lats * d2r
lons = lons * d2r
x = num.sum(num.cos(lats) * num.cos(lons) * weights)
y = num.sum(num.cos(lats) * num.sin(lons) * weights)
z = num.sum(num.sin(lats) * weights)
lon = num.arctan2(y, x)
hyp = num.sqrt(x**2 + y**2)
lat = num.arctan2(z, hyp)
return lat/d2r, lon/d2r
def geographic_midpoint_locations(locations, weights=None):
coords = num.array([loc.effective_latlon
for loc in locations])
return geographic_midpoint(coords[:, 0], coords[:, 1], weights)
def geodetic_to_ecef(lat, lon, alt):
'''
Convert geodetic coordinates to Earth-Centered, Earth-Fixed (ECEF)
Cartesian coordinates. [#1]_ [#2]_
:param lat: Geodetic latitude in [deg].
:param lon: Geodetic longitude in [deg].
:param alt: Geodetic altitude (height) in [m] (positive for points outside
the geoid).
:type lat: float
:type lon: float
:type alt: float
:return: ECEF Cartesian coordinates (X, Y, Z) in [m].
:rtype: tuple, float
.. [#1] https://en.wikipedia.org/wiki/ECEF
.. [#2] https://en.wikipedia.org/wiki/Geographic_coordinate_conversion
#From_geodetic_to_ECEF_coordinates
'''
f = earth_oblateness
a = earthradius_equator
e2 = 2*f - f**2
lat, lon = num.radians(lat), num.radians(lon)
# Normal (plumb line)
N = a / num.sqrt(1.0 - (e2 * num.sin(lat)**2))
X = (N+alt) * num.cos(lat) * num.cos(lon)
Y = (N+alt) * num.cos(lat) * num.sin(lon)
Z = (N*(1.0-e2) + alt) * num.sin(lat)
return (X, Y, Z)
def ecef_to_geodetic(X, Y, Z):
'''
Convert Earth-Centered, Earth-Fixed (ECEF) Cartesian coordinates to
geodetic coordinates (Ferrari's solution).
:param X, Y, Z: Cartesian coordinates in ECEF system in [m].
:type X, Y, Z: float
:return: Geodetic coordinates (lat, lon, alt). Latitude and longitude are
in [deg] and altitude is in [m]
(positive for points outside the geoid).
:rtype: tuple, float
.. seealso ::
https://en.wikipedia.org/wiki/Geographic_coordinate_conversion
#The_application_of_Ferrari.27s_solution
'''
f = earth_oblateness
a = earthradius_equator
b = a * (1. - f)
e2 = 2.*f - f**2
# usefull
a2 = a**2
b2 = b**2
e4 = e2**2
X2 = X**2
Y2 = Y**2
Z2 = Z**2
r = num.sqrt(X2 + Y2)
r2 = r**2
e_prime2 = (a2 - b2)/b2
E2 = a2 - b2
F = 54. * b2 * Z2
G = r2 + (1.-e2)*Z2 - (e2*E2)
C = (e4 * F * r2) / (G**3)
S = cbrt(1. + C + num.sqrt(C**2 + 2.*C))
P = F / (3. * (S + 1./S + 1.)**2 * G**2)
Q = num.sqrt(1. + (2.*e4*P))
dum1 = -(P*e2*r) / (1.+Q)
dum2 = 0.5 * a2 * (1. + 1./Q)
dum3 = (P * (1.-e2) * Z2) / (Q * (1.+Q))
dum4 = 0.5 * P * r2
r0 = dum1 + num.sqrt(dum2 - dum3 - dum4)
U = num.sqrt((r - e2*r0)**2 + Z2)
V = num.sqrt((r - e2*r0)**2 + (1.-e2)*Z2)
Z0 = (b2*Z) / (a*V)
alt = U * (1. - (b2 / (a*V)))
lat = num.arctan((Z + e_prime2 * Z0)/r)
lon = num.arctan2(Y, X)
return (lat*r2d, lon*r2d, alt)
class Farside(Exception):
pass
def latlon_to_xyz(latlons):
if latlons.ndim == 1:
return latlon_to_xyz(latlons[num.newaxis, :])[0]
points = num.zeros((latlons.shape[0], 3))
lats = latlons[:, 0]
lons = latlons[:, 1]
points[:, 0] = num.cos(lats*d2r) * num.cos(lons*d2r)
points[:, 1] = num.cos(lats*d2r) * num.sin(lons*d2r)
points[:, 2] = num.sin(lats*d2r)
return points
def xyz_to_latlon(xyz):
if xyz.ndim == 1:
return xyz_to_latlon(xyz[num.newaxis, :])[0]
latlons = num.zeros((xyz.shape[0], 2))
latlons[:, 0] = num.arctan2(
xyz[:, 2], num.sqrt(xyz[:, 0]**2 + xyz[:, 1]**2)) * r2d
latlons[:, 1] = num.arctan2(
xyz[:, 1], xyz[:, 0]) * r2d
return latlons
def rot_to_00(lat, lon):
rot0 = euler_to_matrix(0., -90.*d2r, 0.).A
rot1 = euler_to_matrix(-d2r*lat, 0., -d2r*lon).A
return num.dot(rot0.T, num.dot(rot1, rot0)).T
def distances3d(a, b):
return num.sqrt(num.sum((a-b)**2, axis=a.ndim-1))
def circulation(points2):
return num.sum(
(points2[1:, 0] - points2[:-1, 0])
* (points2[1:, 1] + points2[:-1, 1]))
def stereographic(points):
dists = distances3d(points[1:, :], points[:-1, :])
if dists.size > 0:
maxdist = num.max(dists)
cutoff = maxdist**2 / 2.
else:
cutoff = 1.0e-5
points = points.copy()
if num.any(points[:, 0] < -1. + cutoff):
raise Farside()
points_out = points[:, 1:].copy()
factor = 1.0 / (1.0 + points[:, 0])
points_out *= factor[:, num.newaxis]
return points_out
def stereographic_poly(points):
dists = distances3d(points[1:, :], points[:-1, :])
if dists.size > 0:
maxdist = num.max(dists)
cutoff = maxdist**2 / 2.
else:
cutoff = 1.0e-5
points = points.copy()
if num.any(points[:, 0] < -1. + cutoff):
raise Farside()
points_out = points[:, 1:].copy()
factor = 1.0 / (1.0 + points[:, 0])
points_out *= factor[:, num.newaxis]
if circulation(points_out) >= 0:
raise Farside()
return points_out
def gnomonic_x(points, cutoff=0.01):
points_out = points[:, 1:].copy()
if num.any(points[:, 0] < cutoff):
raise Farside()
factor = 1.0 / points[:, 0]
points_out *= factor[:, num.newaxis]
return points_out
def cneg(i, x):
if i == 1:
return x
else:
return num.logical_not(x)
def contains_points(polygon, points):
'''
Test which points are inside polygon on a sphere.
:param polygon: Point coordinates defining the polygon [deg].
:type polygon: :py:class:`numpy.ndarray` of shape (N, 2), second index
0=lat, 1=lon
:param points: Coordinates of points to test [deg].
:type points: :py:class:`numpy.ndarray` of shape (N, 2), second index
0=lat, 1=lon
:returns: Boolean mask array.
:rtype: :py:class:`numpy.ndarray` of shape (N,)
The inside of the polygon is defined as the area which is to the left hand
side of an observer walking the polygon line, points in order, on the
sphere. Lines between the polygon points are treated as great circle paths.
The polygon may be arbitrarily complex, as long as it does not have any
crossings or thin parts with zero width. The polygon may contain the poles
and is allowed to wrap around the sphere multiple times.
The algorithm works by consecutive cutting of the polygon into (almost)
hemispheres and subsequent Gnomonic projections to perform the
point-in-polygon tests on a 2D plane.
'''
and_ = num.logical_and
points_xyz = latlon_to_xyz(points)
mask_x = 0. <= points_xyz[:, 0]
mask_y = 0. <= points_xyz[:, 1]
mask_z = 0. <= points_xyz[:, 2]
result = num.zeros(points.shape[0], dtype=num.int)
for ix in [-1, 1]:
for iy in [-1, 1]:
for iz in [-1, 1]:
mask = and_(
and_(cneg(ix, mask_x), cneg(iy, mask_y)),
cneg(iz, mask_z))
center_xyz = num.array([ix, iy, iz], dtype=num.float)
lat, lon = xyz_to_latlon(center_xyz)
rot = rot_to_00(lat, lon)
points_rot_xyz = num.dot(rot, points_xyz[mask, :].T).T
points_rot_pro = gnomonic_x(points_rot_xyz)
offset = 0.01
poly_xyz = latlon_to_xyz(polygon)
poly_rot_xyz = num.dot(rot, poly_xyz.T).T
poly_rot_xyz[:, 0] -= offset
groups = spoly_cut([poly_rot_xyz], axis=0)
for poly_rot_group_xyz in groups[1]:
poly_rot_group_xyz[:, 0] += offset
poly_rot_group_pro = gnomonic_x(
poly_rot_group_xyz)
if circulation(poly_rot_group_pro) > 0:
result[mask] += path_contains_points(
poly_rot_group_pro, points_rot_pro)
else:
result[mask] -= path_contains_points(
poly_rot_group_pro, points_rot_pro)
return result.astype(num.bool)
def contains_point(polygon, point):
'''
Test if point is inside polygon on a sphere.
:param polygon: Point coordinates defining the polygon [deg].
:type polygon: :py:class:`numpy.ndarray` of shape (N, 2), second index
0=lat, 1=lon
:param point: Coordinates ``(lat, lon)`` of point to test [deg].
Convenience wrapper to :py:func:`contains_points` to test a single point.
'''
return bool(
contains_points(polygon, num.asarray(point)[num.newaxis, :])[0])
| gpl-3.0 |
toobaz/pandas | pandas/core/tools/numeric.py | 1 | 6552 | import numpy as np
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_object,
is_datetime_or_timedelta_dtype,
is_decimal,
is_number,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
import pandas as pd
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
downcast : {'integer', 'signed', 'unsigned', 'float'} , default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
Returns
-------
ret : numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndexClass):
is_index = True
values = arg.asi8
if values is None:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
try:
if is_numeric_dtype(values):
pass
elif is_datetime_or_timedelta_dtype(values):
values = values.astype(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
except Exception:
if errors == "raise":
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values):
typecodes = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and np.min(values) >= 0:
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
if np.dtype(dtype).itemsize <= values.dtype.itemsize:
values = maybe_downcast_to_dtype(values, dtype)
# successful conversion
if values.dtype == dtype:
break
if is_series:
return pd.Series(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy_with_infer
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
| bsd-3-clause |
beepee14/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
Garrett-R/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
ratnania/pigasus | python/fit/surfit.py | 1 | 16761 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
from time import time
from pigasus.fem.basicPDE import basicPDE
from caid.cad_geometry import cad_geometry, cad_nurbs
from caid.cad_geometry import square as patch
from caid.core.bspline import bsp
from scipy.io import mmwrite
from scipy.sparse import coo_matrix
from pigasus.fit.utils import *
#-----------------------------------
class surfit(object):
def __init__(self, geometry, uvk=None, PDE=None, constraints=[],
mu=None, alpha=1., rational=0):
"""
initialize the surfit object
PDE is the Differential operator to use for smoothing (usually a 2nd
order)
constraints is a list of dictionaries that must be of the following form
constraints[i] is {'patch_id_m', 'face_m', 'patch_id_s', 'face_s',
'type'}
patch_id_m is the master patch id
face_m is the face id in the master patch
patch_id_s is the slave patch id
face_s is the face id in the slave patch
type is the constraint's type: C1, C2, ... (default: C1)
ib is the starting index in the face element (default:0 )
ie is the ending index in the face element (default:-1 )
"""
self.geometry = geometry
self.postAssembly = False
self.nConstraints = 0
self.ConstIndices = []
self.ConstVals = []
self.ConstRHSs = []
self.constraints = constraints
self.alpha = alpha
self.mu = mu
self.rational = rational
self.Rd = 2 # TODO make it automatic
self.RHS = None
self.Mat = None
if PDE is not None:
self.PDE = PDE
else:
from pigasus.fem.basicPDE import basicPDE
func_zero = lambda x,y : [ 0. ]
func_bip = lambda x,y : [ 1., 0., 0. \
, 0., 2., 0. \
, 0., 0., 1. ]
testcase = {}
testcase['D2'] = func_bip
testcase['u'] = func_zero
testcase['f'] = func_zero
self.PDE = basicPDE(geometry=geometry, testcase=testcase)
# assembly the PDE
self.PDE.assembly()
self.ID_loc = self.PDE.space.connectivity.ID_loc
if uvk is not None:
self.Dt, self.Mat = self.updateMatrix(uvk)
self.postAssembly = True
@property
def system(self):
return self.PDE.system.get()
@property
def space(self):
return self.PDE.space
# ...
def updateMatrix(self, lists_uvk, verbose=False):
lists_uk = lists_uvk[0]
lists_vk = lists_uvk[1]
geo = self.geometry
ID_loc = self.ID_loc
Mat = self.system
Mat_shape = Mat.shape
# print "shape ", Mat_shape
# ... construct the D matrix for the first patch
patch_id = 0
nrb = geo[patch_id]
list_uk = lists_uk[patch_id]
list_vk = lists_vk[patch_id]
ID = ID_loc[patch_id]
# np.savetxt("ID"+str(patch_id)+".txt",np.asarray(ID), fmt='%d')
list_basis = evalBasis2D(nrb, list_uk, list_vk \
, rational=self.rational \
, verbose=verbose)
rows, cols, data = addContributions2D(nrb, list_uk, list_vk \
, list_basis, ID \
, verbose=verbose)
# np.savetxt("rows"+str(patch_id)+".txt", rows, fmt='%d')
# np.savetxt("cols"+str(patch_id)+".txt", cols, fmt='%d')
# ...
# ... coompute and update the D matrix for the other patchs
for patch_id in range(1, geo.npatchs):
nrb = geo[patch_id]
list_uk = lists_uk[patch_id]
list_vk = lists_vk[patch_id]
ID = ID_loc[patch_id]
# np.savetxt("ID"+str(patch_id)+".txt",np.asarray(ID), fmt='%d')
# ...
list_basis = evalBasis2D(nrb, list_uk, list_vk \
, rational=self.rational \
, verbose=verbose)
rowsk, colsk, datak = addContributions2D(nrb, list_uk, list_vk \
, list_basis, ID \
, verbose=verbose)
colsk = np.asarray(colsk)
colsk += np.max(cols)+1
# np.savetxt("rows"+str(patch_id)+".txt", rowsk, fmt='%d')
# np.savetxt("cols"+str(patch_id)+".txt", colsk, fmt='%d')
rows = np.concatenate([rows, rowsk])
cols = np.concatenate([cols, colsk])
data = np.concatenate([data, datak])
ntotal = len(np.concatenate(lists_uk))
shp = [Mat_shape[0],ntotal]
D = coo_matrix((data, (rows, cols)), shape=shp)
D = D.tocsr()
Dt = D.transpose().tocsr()
DtD = D * Dt
Mk = DtD.transpose().tocsr()
# print "Mat_shape ", Mat_shape
# print "system shape ", self.system.shape
# print "Mk shape ", Mk.shape
if self.mu is None:
Mat_norm = np.linalg.norm(Mat.todense())
Mk_norm = np.linalg.norm(Mk.todense())
mu = Mk_norm / Mat_norm
else:
mu = self.mu
alpha = self.alpha
Mat = Mk + alpha * mu * Mat
# from scipy.io import mmwrite
# mmwrite("Mat.mtx", Mat)
# mmwrite("D.mtx", D)
return D, Mat
# ...
def fit(self, xyzk, uvk=None):
if (not self.postAssembly) and (uvk is None):
print("You must run updateMatrix before!")
return
if (not self.postAssembly) and (uvk is not None):
self.Dt, self.Mat = self.updateMatrix(uvk)
self.postAssembly = True
self.updateGlobalSystem()
from scipy.sparse.linalg import cg as solve
N_ini = self.system.shape[0]
N_final = N_ini+self.nConstraints
# print "N_ini ", N_ini
# print "N_final ", N_final
# print "nconstraints ", self.nConstraints
cxyz = []
# xk is a list of arrays. one array per patch
for i,xk in enumerate(xyzk):
# print "==== i "+str(i)+" ==="
x = np.zeros(N_final)
# we must transform xk into one single array
_xk = np.concatenate(xk)
# print "xk ", len(_xk)
# print "Dt ", self.Dt.shape
x[:N_ini] = self.Dt * _xk
# ... update with rhs for some Constraints
x += np.asarray(self.RHS[i])
# ...
# np.savetxt("x"+str(id(xk))+".txt", x)
Cx = solve(self.Mat, x)[0]
cx = Cx[:N_ini]
# np.savetxt("xk"+str(i)+".txt", _xk)
# np.savetxt("x"+str(i)+".txt", x)
# np.savetxt("xa"+str(i)+".txt", x[:81])
# np.savetxt("xb"+str(i)+".txt", x[81:])
# np.savetxt("cx"+str(i)+".txt", cx)
# print ">> x "
# print x
# print "---"
# print "---"
# print cx
# print "---"
cxyz.append(cx)
# from scipy.linalg import det
# print "det = ", det(self.Mat.todense())
# print "---"
# print ">> xk "
# print xk
# print "---"
# print ">> self.Dt "
# print self.Dt
# print "---"
# print self.Mat
return cxyz
def construct(self, xyzk, uvk=None, exportGeometry=True):
cxyz = self.fit(xyzk, uvk=uvk)
U = self.PDE.unknown
geo_ini = self.geometry
if exportGeometry:
geo_f = cad_geometry()
else:
list_nrb = []
for patch_id in range(0, self.geometry.npatchs):
nrb = self.geometry[patch_id]
C = np.zeros(list(nrb.shape)+[3])
for i,cx in enumerate(cxyz):
U.set(cx)
C[...,i] = U.tomatrix(patch_id).reshape(nrb.shape)
if exportGeometry:
srf = cad_nurbs(nrb.knots, C, weights=nrb.weights)
srf.orientation = nrb.orientation
srf.rational = nrb.rational
geo_f.append(srf)
else:
srf = cad_nurbs(nrb.knots, C, weights=nrb.weights)
list_nrb.append(srf)
if exportGeometry:
geo_f.set_internal_faces(geo_ini.internal_faces)
geo_f.set_external_faces(geo_ini.external_faces)
geo_f.set_connectivity(geo_ini.connectivity)
return geo_f
else:
return list_nrb
# ...
def genLineIndices(self, patch_id, face, shift=0, ib=0, ie=None):
if face == 0:
if ie is None:
list_A = self.ID_loc[patch_id][ib:,0+shift]
else:
list_A = self.ID_loc[patch_id][ib:ie,0+shift]
if face == 1:
if ie is None:
list_A = self.ID_loc[patch_id][0+shift,ib:]
else:
list_A = self.ID_loc[patch_id][0+shift,ib:ie]
if face == 2:
if ie is None:
list_A = self.ID_loc[patch_id][ib:,-1-shift]
else:
list_A = self.ID_loc[patch_id][ib:ie,-1-shift]
if face == 3:
if ie is None:
list_A = self.ID_loc[patch_id][-1-shift,ib:]
else:
list_A = self.ID_loc[patch_id][-1-shift,ib:ie]
list_A = list_A - 1 # 0 based index
return list_A
# ...
# ...
def addConstraint(self, patch_id_m, fm, typeC \
, patch_id_s=None, fs=None \
, list_vals=None \
, ib=0, ie=None):
# ... C0 Condition
if typeC == "C0":
nrb_m = self.geometry[patch_id_m] # master
list_Am = self.genLineIndices(patch_id_m, fm, shift=0)
# print "list_Am ", list_Am
list_As = None
if (patch_id_s is not None) and (fs is not None):
nrb_s = self.geometry[patch_id_s] # slave
list_As = self.genLineIndices(patch_id_s, fs, shift=0)
# print "list_As ", list_As
rows, values, rhs = genLineC0Constraint( self.Rd \
, list_Am \
, list_As=list_As \
, list_vals=list_vals \
)
# print "rows ", rows
# print "values ", values
# print "rhs ", rhs
# ...
# ... C1 Condition
if typeC == "C1":
nrb_m = self.geometry[patch_id_m] # master
list_Am = self.genLineIndices(patch_id_m, fm, shift=0, ib=ib, ie=ie)
list_Bm = self.genLineIndices(patch_id_m, fm, shift=1, ib=ib, ie=ie)
cm = computeC1Coef2D(nrb_m, fm)
list_As = None
list_Bs = None
cs = None
if (patch_id_s is not None) and (fs is not None):
nrb_s = self.geometry[patch_id_s] # slave
list_As = self.genLineIndices(patch_id_s, fs, shift=0, ib=ib, ie=ie)
list_Bs = self.genLineIndices(patch_id_s, fs, shift=1, ib=ib, ie=ie)
cs = computeC1Coef2D(nrb_s, fs)
rows, values, rhs = genLineC1Constraint( self.Rd \
, list_Am \
, list_Bm \
, cm \
, list_As=list_As \
, list_Bs=list_Bs \
, cs=cs \
, list_vals=list_vals \
)
# ...
self.nConstraints += len(rows)
return rows, values, rhs
# ...
# ...
def updateGlobalSystem(self):
# ...
for constraint in self.constraints:
patch_id_m = None
fm = None
typeC = "C1"
patch_id_s = None
fs = None
list_vals = []
ib = 0
ie = -1
# print "Constraint ", constraint
for key, value in constraint.items():
# print key
# print value
if key == "patch_id_m":
patch_id_m = int(value)
if key == "face_m":
fm = int(value)
if key == "type":
typeC = str(value)
if key == "patch_id_s":
patch_id_s = int(value)
if key == "face_s":
fs = int(value)
if key == "values":
list_vals = value
if key == "ib":
ib = int(value)
if key == "ie":
ie = int(value)
# print "==============="
# print "patch_id_m ", patch_id_m
# print "patch_id_s ", patch_id_s
# print "fm ", fm
# print "fs ", fs
# print "list_vals ", list_vals
# print "==============="
if len(list_vals) == 0:
list_vals = None
rows, values, rhs = self.addConstraint( patch_id_m, fm \
, typeC \
, patch_id_s=patch_id_s, fs=fs \
, list_vals=list_vals \
, ib=ib \
, ie=ie)
self.ConstIndices += rows
self.ConstVals += values
self.ConstRHSs += rhs
# print "r ", rhs
# ...
# ...
Mat = self.Mat.tocoo()
n,m = Mat.shape
# print "Mat.shape ", self.Mat.shape
allData = list(Mat.data)
allRows = list(Mat.row)
allCols = list(Mat.col)
allRHS = []
for i in range(0,self.Rd):
allRHS.append(list(np.zeros(m)))
# print "allRHS ", allRHS
# print "self.ConstRHSs ", self.ConstRHSs
# print "len ", len(allRHS[0])
# print "self.ConstIndices ", self.ConstIndices
# print "self.ConstVals ", self.ConstVals
# print "self.ConstRHSs ", self.ConstRHSs
nCurrent = n
for indices, data, rhs in zip( self.ConstIndices \
, self.ConstVals \
, self.ConstRHSs):
# print "indices ", indices
# print "data ", data
# print "rhs ", rhs
allData += data
allRows += indices
# allCols += nCurrent*np.ones(len(indices), dtype=np.int) #[nCurrent]*len(indices)
allCols += [nCurrent]*len(indices)
# treatment of the transposed part
allData += data
allCols += indices
# allRows += nCurrent*np.ones(len(indices), dtype=np.int) #[nCurrent]*len(indices)
allRows += [nCurrent]*len(indices)
# treatment of the RHS
# print "rhs ", rhs
_rhs = rhs #[list(L) for L in zip(*rhs)]
# print ">>> _rhs : ",_rhs
# print "len ", len(allRHS[0])
for i in range(0, self.Rd):
# print ">> i ", i
# print "_rhs[i] ", _rhs[i]
allRHS[i].append(_rhs[i])
# print "len ", len(allRHS[0])
nCurrent += 1
n += self.nConstraints
m += self.nConstraints
shp = [n,m]
self.Mat = coo_matrix((allData, (allRows, allCols)), shape=shp)
self.Mat.tocsr()
# print "allRHS ", allRHS
# print "allRHS-x ", allRHS[0]
# print "allRHS-y ", allRHS[1]
# print len(allRHS[0])
# print len(allRHS[1])
self.RHS = [np.asarray(rhs) for rhs in allRHS]
# print "*********"
# print "Final Mat.shape ", self.Mat.shape
# print "Final RHS.shape ", [R.shape for R in self.RHS]
# print "*********"
# from scipy.io import mmwrite
# mmwrite("Mat.mtx", self.Mat)
# np.savetxt("RHS.txt", self.RHS)
# ...
# ...
#-----------------------------------
| mit |
robotenique/mlAlgorithms | supervised/modelEvaluation/ex5.py | 1 | 8702 | import matplotlib.pyplot as plt
from matplotlib.pyplot import show
import scipy.io
import numpy as np
from linearRegCostFunction import linearRegCostFunction
from trainLinearReg import trainLinearReg
from learningCurve import learningCurve
from polyFeatures import polyFeatures
from featureNormalize import featureNormalize
from plotFit import plotFit
from validationCurve import validationCurve
# Machine Learning Online Class
# Exercise 5 | Regularized Linear Regression and Bias-Variance
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# linearRegCostFunction.m
# learningCurve.m
# validationCurve.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
# =========== Part 1: Loading and Visualizing Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and plot
# the data.
# Load Training Data
print('Loading and Visualizing Data ...')
# Load from ex5data1:
# You will have X, y, Xval, yval, Xtest, ytest in your environment
data = scipy.io.loadmat('ex5data1.mat')
# m = Number of examples
X = data['X'][:, 0]
y = data['y'][:, 0]
Xval = data['Xval'][:, 0]
yval = data['yval'][:, 0]
Xtest = data['Xtest'][:, 0]
ytest = data['ytest'][:, 0]
m = X.size
# Plot training data
plt.scatter(X, y, marker='x', s=60, edgecolor='r', color='r', lw=1.5)
plt.ylabel('Water flowing out of the dam (y)') # Set the y-axis label
plt.xlabel('Change in water level (x)') # Set the x-axis label
show()
input('Program paused. Press Enter to continue...')
# =========== Part 2: Regularized Linear Regression Cost =============
# You should now implement the cost function for regularized linear
# regression.
theta = np.array([1, 1])
J, _ = linearRegCostFunction(np.column_stack((np.ones(m), X)), y, theta, Lambda=1)
print('Cost at theta = [1 1]: %f \n(this value should be about 303.993192)\n' % J)
input('Program paused. Press Enter to continue...')
# =========== Part 3: Regularized Linear Regression Gradient =============
# You should now implement the gradient for regularized linear
# regression.
theta = np.array([1, 1])
J, grad = linearRegCostFunction(np.column_stack((np.ones(m), X)), y, theta, Lambda=1)
print('Gradient at theta = [1 1]: [%f %f] \n'
'(this value should be about [-15.303016 598.250744])\n' % (grad[0], grad[1]))
input('Program paused. Press Enter to continue...')
# =========== Part 4: Train Linear Regression =============
# Once you have implemented the cost and gradient correctly, the
# trainLinearReg function will use your cost function to train
# regularized linear regression.
#
# Write Up Note: The data is non-linear, so this will not give a great
# fit.
# Train linear regression with Lambda = 0
Lambda = 0
theta = trainLinearReg(np.column_stack((np.ones(m), X)), y, Lambda=Lambda)
# Plot fit over the data
plt.scatter(X, y, marker='x', s=60, edgecolor='r', color='r', lw=1.5)
plt.ylabel('Water flowing out of the dam (y)') # Set the y-axis label
plt.xlabel('Change in water level (x)') # Set the x-axis label
plt.plot(X, np.column_stack((np.ones(m), X)).dot(theta), '--', lw=2.0)
show()
input('Program paused. Press Enter to continue...')
# =========== Part 5: Learning Curve for Linear Regression =============
# Next, you should implement the learningCurve function.
#
# Write Up Note: Since the model is underfitting the data, we expect to
# see a graph with "high bias" -- slide 8 in ML-advice.pdf
#
Lambda = 0
error_train, error_val = learningCurve(np.column_stack((np.ones(m), X)), y,
np.column_stack((np.ones(Xval.shape[0]), Xval)),
yval,
Lambda=Lambda)
plt.figure()
plt.plot(range(1,m + 1), error_train, color='b', lw=2, label='Train')
plt.plot(range(1,m + 1), error_val, color='g', lw=2, label='Cross Validation')
plt.title('Learning Curve for Linear Regression')
plt.legend()
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.xlim(0, 13)
plt.legend(loc='upper right', shadow=True, fontsize='x-large', numpoints=1)
show()
print('Training Examples\tTrain Error\tCross Validation Error')
plt.ylim(0, max(np.concatenate((error_train, error_val))) + 10)
for i in range(m):
print(' \t%d\t\t%f\t%f' % (i, error_train[i], error_val[i]))
input('Program paused. Press Enter to continue...')"""
# =========== Part 6: Feature Mapping for Polynomial Regression =============
# One solution to this is to use polynomial regression. You should now
# complete polyFeatures to map each example into its powers
p = 8
# Map X onto Polynomial Features and Normalize
X_poly = polyFeatures(X, p)
X_poly, mu, sigma = featureNormalize(X_poly) # Normalize
X_poly = np.column_stack((np.ones(m), X_poly)) # Add Ones
# Map X_poly_test and normalize (using mu and sigma)
X_poly_test = polyFeatures(Xtest, p)
X_poly_test = X_poly_test - mu
X_poly_test = X_poly_test / sigma
X_poly_test = np.column_stack((np.ones(X_poly_test.shape[0]), X_poly_test)) # Add Ones
# Map X_poly_val and normalize (using mu and sigma)
X_poly_val = polyFeatures(Xval, p)
X_poly_val = X_poly_val - mu
X_poly_val = X_poly_val / sigma
X_poly_val = np.column_stack((np.ones(X_poly_test.shape[0]), X_poly_val)) # Add Ones
"""
print('Normalized Training Example 1:')
print(X_poly[0, :])
input('Program paused. Press Enter to continue...')
# =========== Part 7: Learning Curve for Polynomial Regression =============
# Now, you will get to experiment with polynomial regression with multiple
# values of Lambda. The code below runs polynomial regression with
# Lambda = 0. You should try running the code with different values of
# Lambda to see how the fit and learning curve change.
Lambda = 0.1
theta = trainLinearReg(X_poly, y, Lambda, method='BFGS', maxiter=10)
# Plot training data and fit
plt.figure()
plt.scatter(X, y, marker='x', s=60, edgecolor='r', color='r', lw=1.5)
plotFit(min(X), max(X), mu, sigma, theta, p)
plt.xlabel('Change in water level (x)') # Set the y-axis label
plt.ylabel('Water flowing out of the dam (y)') # Set the x-axis label
plt.title('Polynomial Regression Fit (Lambda = %f)' % Lambda)
show()
input('Program paused. Press Enter to continue...')
error_train, error_val = learningCurve(X_poly, y, X_poly_val, yval, Lambda)
plt.figure()
plt.plot(range(1, m + 1), error_train, color='b', lw=2, label='Train')
plt.plot(range(1, m + 1), error_val, color='g', lw=2, label='Cross Validation')
plt.title('Polynomial Regression Learning Curve (Lambda = %f)' % Lambda)
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.xlim(0, 13)
plt.ylim(0, max(np.concatenate((error_train, error_val))) + 10)
plt.legend(loc='upper right', shadow=True, fontsize='x-large', numpoints=1)
show()
print('Polynomial Regression (Lambda = %f)\n\n' % Lambda)
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(m):
print(' \t%d\t\t%f\t%f' % (i, error_train[i], error_val[i]))
input('Program paused. Press Enter to continue...')
# =========== Part 8: Validation for Selecting Lambda =============
# You will now implement validationCurve to test various values of
# Lambda on a validation set. You will then use this to select the
# "best" Lambda value.
Lambda_vec, error_train, error_val = validationCurve(X_poly, y, X_poly_val, yval)
plt.figure()
plt.plot(Lambda_vec, error_train, color='b', lw=0.5, label='Train')
plt.plot(Lambda_vec, error_val, color='g', lw=0.5, label='Cross Validation')
plt.legend(loc='upper right', shadow=True, fontsize='x-large', numpoints=1)
plt.title('Polynomial Regression Validation Curve')
plt.xlabel('Lambda')
plt.ylabel('Error')
show()
# Calculating the error on a TEST set never used before
# using best_lambda found by validationCurve()
best_lambda = 3.2832832832832834
best_theta = trainLinearReg(X_poly, y, best_lambda)
X_poly_test = polyFeatures(Xtest, p)
X_poly_test = X_poly_test - mu
X_poly_test = X_poly_test / sigma
X_poly_test = np.column_stack((np.ones(X_poly_test.shape[0]), X_poly_test)) # Add Ones
error_training, _ = linearRegCostFunction(X_poly_test, ytest, best_theta, 0)
print(f"Error with best_lambda({best_lambda}) on test set = {error_training}")
print('Lambda\t\tTrain Error\tValidation Error')
for i in range(Lambda_vec.size):
print(' %f\t%f\t%f' % (Lambda_vec[i], error_train[i], error_val[i]))
input('Program paused. Press Enter to continue...')
| unlicense |
CallaJun/hackprince | indico/skimage/viewer/canvastools/painttool.py | 5 | 6429 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
LABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold',
'greenyellow', 'blueviolet'])
from ...viewer.canvastools.base import CanvasToolBase
__all__ = ['PaintTool']
class PaintTool(CanvasToolBase):
"""Widget for painting on top of a plot.
Parameters
----------
viewer : :class:`skimage.viewer.Viewer`
Skimage viewer object.
overlay_shape : shape tuple
2D shape tuple used to initialize overlay image.
alpha : float (between [0, 1])
Opacity of overlay
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
overlay : array
Overlay of painted labels displayed on top of image.
label : int
Current paint color.
"""
def __init__(self, viewer, overlay_shape, radius=5, alpha=0.3,
on_move=None, on_release=None, on_enter=None,
rect_props=None):
super(PaintTool, self).__init__(viewer, on_move=on_move,
on_enter=on_enter,
on_release=on_release)
props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True)
props.update(rect_props if rect_props is not None else {})
self.alpha = alpha
self.cmap = LABELS_CMAP
self._overlay_plot = None
self.shape = overlay_shape
self._cursor = plt.Rectangle((0, 0), 0, 0, **props)
self._cursor.set_visible(False)
self.ax.add_patch(self._cursor)
# `label` and `radius` can only be set after initializing `_cursor`
self.label = 1
self.radius = radius
# Note that the order is important: Redraw cursor *after* overlay
self.artists = [self._overlay_plot, self._cursor]
viewer.add_tool(self)
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if value >= self.cmap.N:
raise ValueError('Maximum label value = %s' % len(self.cmap - 1))
self._label = value
self._cursor.set_edgecolor(self.cmap(value))
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, r):
self._radius = r
self._width = 2 * r + 1
self._cursor.set_width(self._width)
self._cursor.set_height(self._width)
self.window = CenteredWindow(r, self._shape)
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
if image is None:
self.ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
props = dict(cmap=self.cmap, alpha=self.alpha,
norm=mcolors.NoNorm(), animated=True)
self._overlay_plot = self.ax.imshow(image, **props)
else:
self._overlay_plot.set_data(image)
self.redraw()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
self._shape = shape
if not self._overlay_plot is None:
self._overlay_plot.set_extent((-0.5, shape[1] + 0.5,
shape[0] + 0.5, -0.5))
self.radius = self._radius
self.overlay = np.zeros(shape, dtype='uint8')
def on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.redraw()
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self.update_cursor(event.xdata, event.ydata)
self.update_overlay(event.xdata, event.ydata)
def on_mouse_release(self, event):
if event.button != 1:
return
self.callback_on_release(self.geometry)
def on_move(self, event):
if not self.viewer.ax.in_axes(event):
self._cursor.set_visible(False)
self.redraw() # make sure cursor is not visible
return
self._cursor.set_visible(True)
self.update_cursor(event.xdata, event.ydata)
if event.button != 1:
self.redraw() # update cursor position
return
self.update_overlay(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update_overlay(self, x, y):
overlay = self.overlay
overlay[self.window.at(y, x)] = self.label
# Note that overlay calls `redraw`
self.overlay = overlay
def update_cursor(self, x, y):
x = x - self.radius - 1
y = y - self.radius - 1
self._cursor.set_xy((x, y))
@property
def geometry(self):
return self.overlay
class CenteredWindow(object):
"""Window that create slices numpy arrays over 2D windows.
Examples
--------
>>> a = np.arange(16).reshape(4, 4)
>>> w = CenteredWindow(1, a.shape)
>>> a[w.at(1, 1)]
array([[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10]])
>>> a[w.at(0, 0)]
array([[0, 1],
[4, 5]])
>>> a[w.at(4, 3)]
array([[14, 15]])
"""
def __init__(self, radius, array_shape):
self.radius = radius
self.array_shape = array_shape
def at(self, row, col):
h, w = self.array_shape
r = self.radius
xmin = max(0, col - r)
xmax = min(w, col + r + 1)
ymin = max(0, row - r)
ymax = min(h, row + r + 1)
return [slice(ymin, ymax), slice(xmin, xmax)]
if __name__ == '__main__': # pragma: no cover
np.testing.rundocs()
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
paint_tool = PaintTool(viewer, image.shape)
viewer.show()
| lgpl-3.0 |
0x0all/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
ZhiangChen/soft_arm | src/training3_sim.py | 1 | 13853 | #!/usr/bin/env python
"""
Training on simulation
"""
from ddpg import *
import rospy
import numpy as np
from geometry_msgs.msg import PoseArray as PA
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import PoseStamped as PS
from soft_arm.srv import *
from sensor_msgs.msg import PointCloud as PC
from geometry_msgs.msg import Point
import pickle
from simulator import Sim
import matplotlib.pyplot as plt
MAX_EPISODES = 2000
MAX_EP_STEPS = 200
X_OFFSET = 0.0
Y_OFFSET = 0.0
Z_OFFSET = 0.0
S_DIM = 3
A_DIM = 3
A_BOUND = 10.0
GOT_GOAL = -0.05
TRAIN_POINT = 100000
VAR_DECAY = 0.999998
VAR_INIT = 1.0
GAMMA = 0.98
class Trainer(object):
def __init__(self):
""" Initializing DDPG """
self.sim = Sim()
self.ddpg = DDPG(a_dim=A_DIM, s_dim=S_DIM, batch_size=10, memory_capacity=100000, gamma=GAMMA) #gamma=0.98
self.ep_reward = 0.0
self.current_ep = 0
self.current_step = 0
self.current_action = np.array([.0, .0, .0])
self.done = True # if the episode is done
self.var = VAR_INIT
self.reward_record = list()
self.ep_record = list()
self.fig = plt.gcf()
self.fig.show()
self.fig.canvas.draw()
print("Initialized DDPG")
""" Setting communication"""
self.pc = PC()
self.pc.header.frame_id = 'world'
self.pub = rospy.Publisher('normalized_state', PC, queue_size=10)
self.pub1 = rospy.Publisher('state', PC, queue_size=10)
"""
self.sub = rospy.Subscriber('robot_pose', PA, self.callback, queue_size=1)
self.pub = rospy.Publisher('normalized_state', PC, queue_size=10)
rospy.wait_for_service('airpress_control', timeout=5)
self.target_PS = PS()
self.action_V3 = Vector3()
self.state = PA()
self.updated = False # if s is updated
self.got_callback1 = False
self.got_callback2 = False
print("Initialized communication")
"""
""" Reading targets """
""" The data should be w.r.t origin by base position """
self.ends = pickle.load(open('./data/ends.p', 'rb'))
self.x_offset = X_OFFSET
self.y_offset = Y_OFFSET
self.z_offset = Z_OFFSET
self.scaler = 1/0.3
self.sample_target()
print("Read target data")
#self.ddpg.restore_momery()
#self.ddpg.restore_model()
while not (rospy.is_shutdown()):
if self.current_ep < MAX_EPISODES:
if self.current_step < MAX_EP_STEPS:
#rospy.sleep(0.5)
p = self.sim.current_pose
s = np.vstack((p, self.target))
s = s[3:,:]
s = self.normalize_state(s)
norm_a = self.ddpg.choose_action(s.reshape(6)[-S_DIM:])
noise_a = np.random.normal(norm_a, self.var)
action = np.clip(noise_a, -1.0, 1.0)
self.current_action = action*A_BOUND + A_BOUND
p_ = self.sim.update_pose(self.current_action)
s_ = np.vstack((p_, self.target))
s_ = s_[3:,:]
s_ = self.normalize_state(s_)
self.compute_reward(s[0,:], s_[1,:])
self.current_step += 1
if self.reward > GOT_GOAL:
self.reward += 1.0
self.ep_reward += self.reward
if self.current_ep% 10 ==0:
self.reward_record.append(self.ep_reward / self.current_step)
self.ep_record.append(self.current_ep)
plt.plot(self.ep_record, self.reward_record)
plt.ylim([-1.2,0.5])
self.fig.canvas.draw()
self.fig.savefig('learning.png')
print('\n')
print "Target Reached"
print("Normalized action:")
print norm_a
print("Noise action:")
print action
print("Output action:")
print self.current_action
print("Reward: %f" % self.reward)
print('Episode:', self.current_ep, ' Reward: %f' % self.ep_reward, 'Explore: %.3f' % self.var,)
print('*' * 40)
self.ddpg.save_model()
self.ddpg.save_memory()
self.done = True
self.current_step = 0
self.current_ep += 1
self.sample_target()
self.ep_reward = 0
#self.ddpg.save_memory()
#self.ddpg.save_model()
"""
self.current_action = np.array([.0, .0, .0])
self.action_V3.x, self.action_V3.y, self.action_V3.z \
= self.current_action[0], self.current_action[1], self.current_action[2]
self.run_action(self.action_V3)
"""
else:
self.ep_reward += self.reward
if self.current_step == MAX_EP_STEPS:
if self.current_ep % 10 ==0:
self.reward_record.append(self.ep_reward / self.current_step)
self.ep_record.append(self.current_ep)
plt.plot(self.ep_record, self.reward_record)
plt.ylim([-1.2, 0.5])
self.fig.canvas.draw()
self.fig.savefig('learning.png')
print('\n')
print "Target Failed"
print("Normalized action:")
print norm_a
print("Noise action:")
print action
print("Output action:")
print self.current_action
print("Reward: %f" % self.reward)
print('Episode:', self.current_ep, ' Reward: %f' % self.ep_reward, 'Explore: %.3f' % self.var,)
print('*' * 40)
self.ddpg.save_model()
self.ddpg.save_memory()
self.done = False
self.current_step = 0
self.current_ep += 1
self.sample_target()
self.ep_reward = 0
#self.ddpg.save_memory()
#self.ddpg.save_model()
"""
self.current_action = np.array([.0, .0, .0])
self.action_V3.x, self.action_V3.y, self.action_V3.z \
= self.current_action[0], self.current_action[1], self.current_action[2]
self.run_action(self.action_V3)
"""
self.ddpg.store_transition(s.reshape(6)[-S_DIM:], action, self.reward, s_.reshape(6)[-S_DIM:])
if self.ddpg.pointer > TRAIN_POINT:
#if (self.current_step % 10 == 0):
#self.var *= VAR_DECAY
#self.var = max(0.0,self.var-1.02/(MAX_EP_STEPS*MAX_EPISODES))
self.ddpg.learn()
self.pub_state(s_)
else:
p = self.sim.current_pose
s = np.vstack((p, self.target))
s = s[3:,:]
s = self.normalize_state(s)
norm_a = self.ddpg.choose_action(s.reshape(6)[-S_DIM:])
self.current_action = norm_a * A_BOUND + A_BOUND
print("Normalized action:")
print norm_a
print("Current action:")
print self.current_action
p_ = self.sim.update_pose(self.current_action)
s_ = np.vstack((p_, self.target))
s_ = s_[3:,:]
print("Distance: %f" % np.linalg.norm(s_[0,:]-s_[1,:]))
s_ = self.normalize_state(s_)
self.compute_reward(s_[0, :], s_[1, :])
print('Explore: %.2f' % self.var,)
print("Reward: %f" % self.reward)
rospy.sleep(1)
#print
print self.ddpg.get_value(s.reshape(6)[-S_DIM:],norm_a,self.reward.reshape((-1,1)),s_.reshape(6)[-S_DIM:])
print '\n'
self.pub_state(s_)
if self.reward > GOT_GOAL:
self.done = True
self.current_step = 0
self.current_ep += 1
self.sample_target()
print "Target Reached"
print("Episode %i Ended" % self.current_ep)
print("Reward: %f" % self.reward)
print('Episode:', self.current_ep, ' Reward: %d' % self.ep_reward, 'Explore: %.2f' % self.var,)
print('*' * 40)
self.ep_reward = 0
# self.ddpg.save_memory()
# self.ddpg.save_model()
"""
self.current_action = np.array([.0, .0, .0])
self.action_V3.x, self.action_V3.y, self.action_V3.z \
= self.current_action[0], self.current_action[1], self.current_action[2]
self.run_action(self.action_V3)
"""
else:
self.done = False
self.current_step += 1
if self.current_step == 2:
print "Target Failed"
print("Reward: %f" % self.reward)
print("Episode %i Ends" % self.current_ep)
print(
'Episode:', self.current_ep, ' Reward: %d' % self.ep_reward, 'Explore: %.2f' % self.var,)
print('*' * 40)
self.current_step = 0
self.current_ep += 1
self.sample_target()
self.ep_reward = 0
# self.ddpg.save_memory()
# self.ddpg.save_model()
"""
self.current_action = np.array([.0, .0, .0])
self.action_V3.x, self.action_V3.y, self.action_V3.z \
= self.current_action[0], self.current_action[1], self.current_action[2]
self.run_action(self.action_V3)
"""
"""
def callback(self, pa):
n_px = (np.array([pa.poses[i].position.x for i in range(4)]) - self.x_offset)*self.scaler
n_py = (np.array([pa.poses[i].position.y for i in range(4)]) - self.y_offset)*self.scaler
n_pz = (np.array([pa.poses[i].position.z for i in range(4)]) - self.z_offset)*self.scaler
self.end = np.array((n_px[3], n_py[3], n_pz[3]))
self.n_t = (self.target - np.array([0,0,Z_OFFSET]))*self.scaler
self.s = np.concatenate((n_px, n_py, n_pz, self.n_t))
self.pub_state(n_px, n_py, n_pz, self.n_t)
self.updated = True
"""
def sample_target(self):
self.target = self.ends[np.random.randint(self.ends.shape[0])]
"""
def run_action(self,control):
try:
client = rospy.ServiceProxy('airpress_control', OneSeg)
resp = client(control)
return resp.status
except rospy.ServiceException, e:
print "Service call failed: %s"%e
"""
def compute_reward(self,end,target):
error = target - end
self.reward = -np.linalg.norm(error)
#print np.linalg.norm(error)
"""
def pub_state(self, n_px, n_py, n_pz, n_t):
pts = list()
for i in range(4):
pt = Point()
pt.x = n_px[i]
pt.y = n_py[i]
pt.z = n_pz[i]
pts.append(pt)
pt = Point()
pt.x, pt.y, pt.z = n_t[0], n_t[1], n_t[2]
pts.append(pt)
self.pc.points = pts
self.pub.publish(self.pc)
"""
def pub_state(self, state):
pts = list()
for i in range(state.shape[0]):
pt = Point()
pt.x = state[i,0]
pt.y = state[i,1]
pt.z = state[i,2]
pts.append(pt)
self.pc.points = pts
self.pub.publish(self.pc)
pts = list()
for i in range(state.shape[0]):
pt = Point()
pt.x = state[i, 0] / 10.0
pt.y = state[i, 1] / 10.0
pt.z = state[i, 2] / 40.0 + 0.4
pts.append(pt)
self.pc.points = pts
self.pub1.publish(self.pc)
def normalize_state(self,state):
offset = np.array([0,0,0.4])
scaler = np.array([10,10,40])
s = state - offset
s = np.multiply(s, scaler)
return s
def calculate_dist(self, state):
offset = np.array([0, 0, 0.4])
scaler = np.array([10, 10, 40])
s = np.multiply(state,1.0/scaler)
s += offset
return np.linalg.norm(s)
if __name__ == '__main__':
rospy.init_node('trainer',anonymous=True)
trainer = Trainer()
print("Shutting down ROS node trainer") | mit |
1kastner/analyse_weather_data | plot_weather_data/plot_husconet_temperatures.py | 1 | 2033 | """
Depends on filter_weather_data.filters.preparation.average_husconet_temperature
"""
import logging
import itertools
from matplotlib import pyplot
from matplotlib import dates as mdates
from gather_weather_data.husconet import HUSCONET_STATIONS
from gather_weather_data.husconet import OFFICIAL_HUSCONET_NAME
from gather_weather_data.husconet import load_husconet_station
from . import style_year_2016_plot
def plot_stations():
"""
Plots all HUSCONET weather stations
"""
fig = pyplot.figure()
fig.canvas.set_window_title("husconet year 2016")
pyplot.rcParams['savefig.dpi'] = 300
temperatures = []
for station_name in HUSCONET_STATIONS:
station_df = load_husconet_station(station_name, "2016-05-04T00:00", "2016-05-05T12:01", "temperature")
logging.debug("plotting {station} from {start} to {end}"
.format(station=station_name, start=station_df.index.min(), end=station_df.index.max()))
official_station_name = OFFICIAL_HUSCONET_NAME[station_name]
pyplot.plot(station_df.index, station_df.temperature, label=official_station_name, linewidth=1)
temperatures.append(station_df.temperature)
logging.debug("start plotting")
ax = pyplot.gca()
style_year_2016_plot(ax)
ax.xaxis.set_major_locator(mdates.HourLocator(byhour=[0, 6, 12, 18]))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%d.%m %H:%M"))
fig.autofmt_xdate(bottom=0.2, rotation=50, ha='right')
ax.set_xlabel('')
leg = pyplot.legend()
for line in leg.get_lines():
line.set_linewidth(1) # .4 is too small
pyplot.show()
max_temperature_diff = 0
for t1, t2 in itertools.combinations(temperatures, 2):
_max_temperature_diff = (t1 - t2).max()
if _max_temperature_diff > max_temperature_diff:
max_temperature_diff = _max_temperature_diff
print("max temperature diff", max_temperature_diff)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
plot_stations()
| agpl-3.0 |
devanshdalal/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 19 | 40613 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_split=0.1).fit(X, y)
for tree in est.estimators_.flat:
assert_equal(tree.min_impurity_split, 0.1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(
np.array(sparse.staged_decision_function(X_sparse)),
np.array(sparse.staged_decision_function(X)))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert_gui/shell/ertshell.py | 1 | 4391 | import atexit
from cmd import Cmd
import readline
import os
from ert.enkf import EnKFMain
from ert_gui.shell.custom_kw_keys import CustomKWKeys
from ert_gui.shell.debug import Debug
from ert_gui.shell.cases import Cases
from ert_gui.shell.gen_data_keys import GenDataKeys
from ert_gui.shell.gen_kw_keys import GenKWKeys
from ert_gui.shell.results import Results
from ert_gui.shell.plugins import Plugins
from ert_gui.shell.simulations import Simulations
from ert_gui.shell.summary_keys import SummaryKeys
from ert_gui.shell.workflows import Workflows
from ert_gui.shell import extractFullArgument, getPossibleFilenameCompletions, PlotSettings, ShellContext
import matplotlib
class ErtShell(Cmd):
prompt = "--> "
intro = " :::::::::::::::::::::::::::::::::::::\n" \
" :: ::\n" \
" :: ______ ______ _______ ::\n" \
" :: | ____| | __ \ |__ __| ::\n" \
" :: | |__ | |__) | | | ::\n" \
" :: | __| | _ / | | ::\n" \
" :: | |____ | | \ \ | | ::\n" \
" :: |______| |_| \_\ |_| ::\n" \
" :: ::\n" \
" :: Ensemble based Reservoir Tool ::\n" \
" :::::::::::::::::::::::::::::::::::::\n" \
"\n" \
"Interactive shell for working with ERT.\n" \
"\n" \
"-- Type help for a list of supported commands.\n" \
"-- Type exit or press Ctrl+D to end the shell session.\n" \
"-- Press Tab for auto completion.\n" \
"-- Arrow up/down for history.\n"
def __init__(self):
Cmd.__init__(self)
shell_context = ShellContext(self)
self.__shell_context = shell_context
self.__history_file = os.path.join(os.path.expanduser("~/.ertshell/ertshell.history"))
self.__init_history()
matplotlib.rcParams["backend"] = "Qt4Agg"
matplotlib.rcParams["interactive"] = True
matplotlib.rcParams["mathtext.default"] = "regular"
matplotlib.rcParams["verbose.level"] = "helpful"
matplotlib.rcParams["verbose.fileo"] = "sys.stderr"
try:
matplotlib.style.use("ggplot") # available from version 1.4
except AttributeError:
pass
Debug(shell_context)
PlotSettings(shell_context)
Workflows(shell_context)
Cases(shell_context)
Plugins(shell_context)
SummaryKeys(shell_context)
GenDataKeys(shell_context)
GenKWKeys(shell_context)
Results(shell_context)
Simulations(shell_context)
CustomKWKeys(shell_context)
def __init_history(self):
try:
readline.read_history_file(self.__history_file)
except IOError:
pass
atexit.register(self.__save_history)
def __save_history(self):
if not os.path.exists(os.path.dirname(self.__history_file)):
os.makedirs(os.path.dirname(self.__history_file))
readline.write_history_file(self.__history_file)
def emptyline(self):
pass
def do_load_config(self, config_file):
if os.path.exists(config_file) and os.path.isfile(config_file):
self.shellContext().setErt(EnKFMain(config_file))
else:
print("Error: Config file '%s' not found!\n" % config_file)
def complete_load_config(self, text, line, begidx, endidx):
argument = extractFullArgument(line, endidx)
return getPossibleFilenameCompletions(argument)
def help_load_config(self):
print("\n".join(("load_config config_file",
" Loads a config file.")))
def do_cwd(self, line):
cwd = os.getcwd()
print("Current directory: %s" % cwd)
def help_cwd(self):
print("Show the current directory.")
def do_exit(self, line):
if self.shellContext().ert() is not None:
self.shellContext().ert().free()
return True
def help_exit(self):
return "\n".join(("exit",
" End the shell session.")),
do_EOF = do_exit
def help_EOF(self):
return "\n".join(("EOF",
" The same as exit. (Ctrl+D)")),
def shellContext(self):
return self.__shell_context | gpl-3.0 |
farr/LIGOHamlet | combine.py | 1 | 1707 | #!/usr/bin/env python
import argparse
import bz2
import glob
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
import plotutils.plotutils as pu
import post_process as pp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default='.', help='directory to scan for output subdirs')
args = parser.parse_args()
log_bfs = []
plt.figure(1)
plt.figure(2)
for subdir in glob.glob(op.join(args.dir, '*')):
if op.isdir(subdir):
try:
inp = bz2.BZ2File(op.join(subdir, 'tchain.npy.bz2'), 'r')
try:
tchain = np.load(inp)
finally:
inp.close()
plt.figure(1)
pu.plot_histogram_posterior(tchain[:,:,0].flatten(), normed=True, histtype='step')
plt.figure(2)
pu.plot_histogram_posterior(tchain[:,:,1].flatten(), normed=True, histtype='step')
log_bfs.append(pp.log_odds_signal(tchain))
except:
print 'Couldn\'t process ', subdir
continue
plt.figure(1)
plt.xlabel(r'$R_f$')
plt.ylabel(r'$p\left( R_f \right)$')
plt.savefig(op.join(args.dir, 'fore.pdf'))
plt.savefig(op.join(args.dir, 'fore.png'))
plt.figure(2)
plt.xlabel(r'$R_b$')
plt.ylabel(r'$p\left( R_b \right)$')
plt.savefig(op.join(args.dir, 'back.pdf'))
plt.savefig(op.join(args.dir, 'back.png'))
plt.figure()
plt.plot(log_bfs, '*k')
plt.axhline(15.06) # 5-sigma
plt.ylabel(r'$\ln B_{s,n}$')
plt.savefig(op.join(args.dir, 'bf.pdf'))
plt.savefig(op.join(args.dir, 'bf.png'))
| gpl-3.0 |
jameshensman/GPy | GPy/plotting/matplot_dep/visualize.py | 7 | 23219 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import GPy
import numpy as np
import matplotlib as mpl
import time
from GPy.core.parameterization.variational import VariationalPosterior
try:
import visual
visual_available = True
except ImportError:
visual_available = False
class data_show:
"""
The data_show class is a base class which describes how to visualize a
particular data set. For example, motion capture data can be plotted as a
stick figure, or images are shown using imshow. This class enables latent
to data visualizations for the GP-LVM.
"""
def __init__(self, vals):
self.vals = vals.copy()
# If no axes are defined, create some.
def modify(self, vals):
raise NotImplementedError("this needs to be implemented to use the data_show class")
def close(self):
raise NotImplementedError("this needs to be implemented to use the data_show class")
class vpython_show(data_show):
"""
the vpython_show class is a base class for all visualization methods that use vpython to display. It is initialized with a scene. If the scene is set to None it creates a scene window.
"""
def __init__(self, vals, scene=None):
data_show.__init__(self, vals)
# If no axes are defined, create some.
if scene==None:
self.scene = visual.display(title='Data Visualization')
else:
self.scene = scene
def close(self):
self.scene.exit()
class matplotlib_show(data_show):
"""
the matplotlib_show class is a base class for all visualization methods that use matplotlib. It is initialized with an axis. If the axis is set to None it creates a figure window.
"""
def __init__(self, vals, axes=None):
data_show.__init__(self, vals)
# If no axes are defined, create some.
if axes==None:
fig = plt.figure()
self.axes = fig.add_subplot(111)
else:
self.axes = axes
def close(self):
plt.close(self.axes.get_figure())
class vector_show(matplotlib_show):
"""
A base visualization class that just shows a data vector as a plot of
vector elements alongside their indices.
"""
def __init__(self, vals, axes=None):
matplotlib_show.__init__(self, vals, axes)
#assert vals.ndim == 2, "Please give a vector in [n x 1] to plot"
#assert vals.shape[1] == 1, "only showing a vector in one dimension"
self.size = vals.size
self.handle = self.axes.plot(np.arange(0, vals.size)[:, None], vals)[0]
def modify(self, vals):
self.vals = vals.copy()
xdata, ydata = self.handle.get_data()
assert vals.size == self.size, "values passed into modify changed size! vals.size:{} != in.size:{}".format(vals.size, self.size)
self.handle.set_data(xdata, self.vals)
self.axes.figure.canvas.draw()
class lvm(matplotlib_show):
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1], disable_drag=False):
"""Visualize a latent variable model
:param model: the latent variable model to visualize.
:param data_visualize: the object used to visualize the data which has been modelled.
:type data_visualize: visualize.data_show type.
:param latent_axes: the axes where the latent visualization should be plotted.
"""
if vals is None:
if isinstance(model.X, VariationalPosterior):
vals = model.X.mean.values
else:
vals = model.X.values
if len(vals.shape)==1:
vals = vals[None,:]
matplotlib_show.__init__(self, vals, axes=latent_axes)
if isinstance(latent_axes,mpl.axes.Axes):
self.cid = latent_axes.figure.canvas.mpl_connect('button_press_event', self.on_click)
if not disable_drag:
self.cid = latent_axes.figure.canvas.mpl_connect('motion_notify_event', self.on_move)
self.cid = latent_axes.figure.canvas.mpl_connect('axes_leave_event', self.on_leave)
self.cid = latent_axes.figure.canvas.mpl_connect('axes_enter_event', self.on_enter)
else:
self.cid = latent_axes[0].figure.canvas.mpl_connect('button_press_event', self.on_click)
if not disable_drag:
self.cid = latent_axes[0].figure.canvas.mpl_connect('motion_notify_event', self.on_move)
self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_leave_event', self.on_leave)
self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter)
self.data_visualize = data_visualize
self.model = model
self.latent_axes = latent_axes
self.sense_axes = sense_axes
self.called = False
self.move_on = False
self.latent_index = latent_index
self.latent_dim = model.input_dim
self.disable_drag = disable_drag
# The red cross which shows current latent point.
self.latent_values = vals
self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0]
self.modify(vals)
self.show_sensitivities()
def modify(self, vals):
"""When latent values are modified update the latent representation and ulso update the output visualization."""
self.vals = vals.view(np.ndarray).copy()
y = self.model.predict(self.vals)[0]
self.data_visualize.modify(y)
self.latent_handle.set_data(self.vals[0,self.latent_index[0]], self.vals[0,self.latent_index[1]])
self.axes.figure.canvas.draw()
def on_enter(self,event):
pass
def on_leave(self,event):
pass
def on_click(self, event):
if event.inaxes!=self.latent_axes: return
if self.disable_drag:
self.move_on = True
self.called = True
self.on_move(event)
else:
self.move_on = not self.move_on
self.called = True
def on_move(self, event):
if event.inaxes!=self.latent_axes: return
if self.called and self.move_on:
# Call modify code on move
self.latent_values[:, self.latent_index[0]]=event.xdata
self.latent_values[:, self.latent_index[1]]=event.ydata
self.modify(self.latent_values)
def show_sensitivities(self):
# A click in the bar chart axis for selection a dimension.
if self.sense_axes != None:
self.sense_axes.cla()
self.sense_axes.bar(np.arange(self.model.input_dim), self.model.input_sensitivity(), color='b')
if self.latent_index[1] == self.latent_index[0]:
self.sense_axes.bar(np.array(self.latent_index[0]), self.model.input_sensitivity()[self.latent_index[0]], color='y')
self.sense_axes.bar(np.array(self.latent_index[1]), self.model.input_sensitivity()[self.latent_index[1]], color='y')
else:
self.sense_axes.bar(np.array(self.latent_index[0]), self.model.input_sensitivity()[self.latent_index[0]], color='g')
self.sense_axes.bar(np.array(self.latent_index[1]), self.model.input_sensitivity()[self.latent_index[1]], color='r')
self.sense_axes.figure.canvas.draw()
class lvm_subplots(lvm):
"""
latent_axes is a np array of dimension np.ceil(input_dim/2),
one for each pair of the latent dimensions.
"""
def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None):
self.nplots = int(np.ceil(Model.input_dim/2.))+1
assert len(latent_axes)==self.nplots
if vals==None:
vals = Model.X[0, :]
self.latent_values = vals
for i, axis in enumerate(latent_axes):
if i == self.nplots-1:
if self.nplots*2!=Model.input_dim:
latent_index = [i*2, i*2]
lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, sense_axes, latent_index=latent_index)
else:
latent_index = [i*2, i*2+1]
lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, latent_index=latent_index)
class lvm_dimselect(lvm):
"""
A visualizer for latent variable models which allows selection of the latent dimensions to use by clicking on a bar chart of their length scales.
For an example of the visualizer's use try:
GPy.examples.dimensionality_reduction.BGPVLM_oil()
"""
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1], labels=None):
if latent_axes==None and sense_axes==None:
self.fig,(latent_axes,self.sense_axes) = plt.subplots(1,2)
elif sense_axes==None:
fig=plt.figure()
self.sense_axes = fig.add_subplot(111)
else:
self.sense_axes = sense_axes
self.labels = labels
lvm.__init__(self,vals,model,data_visualize,latent_axes,sense_axes,latent_index)
self.show_sensitivities()
print(self.latent_values)
print("use left and right mouse buttons to select dimensions")
def on_click(self, event):
if event.inaxes==self.sense_axes:
new_index = max(0,min(int(np.round(event.xdata-0.5)),self.model.input_dim-1))
if event.button == 1:
# Make it red if and y-axis (red=port=left) if it is a left button click
self.latent_index[1] = new_index
else:
# Make it green and x-axis (green=starboard=right) if it is a right button click
self.latent_index[0] = new_index
self.show_sensitivities()
self.latent_axes.cla()
self.model.plot_latent(which_indices=self.latent_index,
ax=self.latent_axes, labels=self.labels)
self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0]
self.modify(self.latent_values)
elif event.inaxes==self.latent_axes:
self.move_on = not self.move_on
self.called = True
def on_leave(self,event):
print(type(self.latent_values))
latent_values = self.latent_values.copy()
y = self.model.predict(latent_values[None,:])[0]
self.data_visualize.modify(y)
class image_show(matplotlib_show):
"""Show a data vector as an image. This visualizer rehapes the output vector and displays it as an image.
:param vals: the values of the output to display.
:type vals: ndarray
:param axes: the axes to show the output on.
:type vals: axes handle
:param dimensions: the dimensions that the image needs to be transposed to for display.
:type dimensions: tuple
:param transpose: whether to transpose the image before display.
:type bool: default is False.
:param order: whether array is in Fortan ordering ('F') or Python ordering ('C'). Default is python ('C').
:type order: string
:param invert: whether to invert the pixels or not (default False).
:type invert: bool
:param palette: a palette to use for the image.
:param preset_mean: the preset mean of a scaled image.
:type preset_mean: double
:param preset_std: the preset standard deviation of a scaled image.
:type preset_std: double
:param cmap: the colormap for image visualization
:type cmap: matplotlib.cm"""
def __init__(self, vals, axes=None, dimensions=(16,16), transpose=False, order='C', invert=False, scale=False, palette=[], preset_mean=0., preset_std=1., select_image=0, cmap=None):
matplotlib_show.__init__(self, vals, axes)
self.dimensions = dimensions
self.transpose = transpose
self.order = order
self.invert = invert
self.scale = scale
self.palette = palette
self.preset_mean = preset_mean
self.preset_std = preset_std
self.select_image = select_image # This is used when the y vector contains multiple images concatenated.
self.set_image(self.vals)
if not self.palette == []: # Can just show the image (self.set_image() took care of setting the palette)
self.handle = self.axes.imshow(self.vals, interpolation='nearest')
elif cmap==None: # Use a jet map.
self.handle = self.axes.imshow(self.vals, cmap=plt.cm.jet, interpolation='nearest') # @UndefinedVariable
else: # Use the selected map.
self.handle = self.axes.imshow(self.vals, cmap=cmap, interpolation='nearest') # @UndefinedVariable
plt.show()
def modify(self, vals):
self.set_image(vals.copy())
self.handle.set_array(self.vals)
self.axes.figure.canvas.draw()
def set_image(self, vals):
dim = self.dimensions[0] * self.dimensions[1]
num_images = np.sqrt(vals[0,].size/dim)
if num_images > 1 and num_images.is_integer(): # Show a mosaic of images
num_images = np.int(num_images)
self.vals = np.zeros((self.dimensions[0]*num_images, self.dimensions[1]*num_images))
for iR in range(num_images):
for iC in range(num_images):
cur_img_id = iR*num_images + iC
cur_img = np.reshape(vals[0,dim*cur_img_id+np.array(range(dim))], self.dimensions, order=self.order)
first_row = iR*self.dimensions[0]
last_row = (iR+1)*self.dimensions[0]
first_col = iC*self.dimensions[1]
last_col = (iC+1)*self.dimensions[1]
self.vals[first_row:last_row, first_col:last_col] = cur_img
else:
self.vals = np.reshape(vals[0,dim*self.select_image+np.array(range(dim))], self.dimensions, order=self.order)
if self.transpose:
self.vals = self.vals.T
# if not self.scale:
# self.vals = self.vals
if self.invert:
self.vals = -self.vals
# un-normalizing, for visualisation purposes:
self.vals = self.vals*self.preset_std + self.preset_mean
# Clipping the values:
#self.vals[self.vals < 0] = 0
#self.vals[self.vals > 255] = 255
#else:
#self.vals = 255*(self.vals - self.vals.min())/(self.vals.max() - self.vals.min())
if not self.palette == []: # applying using an image palette (e.g. if the image has been quantized)
from PIL import Image
self.vals = Image.fromarray(self.vals.astype('uint8'))
self.vals.putpalette(self.palette) # palette is a list, must be loaded before calling this function
class mocap_data_show_vpython(vpython_show):
"""Base class for visualizing motion capture data using visual module."""
def __init__(self, vals, scene=None, connect=None, radius=0.1):
vpython_show.__init__(self, vals, scene)
self.radius = radius
self.connect = connect
self.process_values()
self.draw_edges()
self.draw_vertices()
def draw_vertices(self):
self.spheres = []
for i in range(self.vals.shape[0]):
self.spheres.append(visual.sphere(pos=(self.vals[i, 0], self.vals[i, 2], self.vals[i, 1]), radius=self.radius))
self.scene.visible=True
def draw_edges(self):
self.rods = []
self.line_handle = []
if not self.connect==None:
self.I, self.J = np.nonzero(self.connect)
for i, j in zip(self.I, self.J):
pos, axis = self.pos_axis(i, j)
self.rods.append(visual.cylinder(pos=pos, axis=axis, radius=self.radius))
def modify_vertices(self):
for i in range(self.vals.shape[0]):
self.spheres[i].pos = (self.vals[i, 0], self.vals[i, 2], self.vals[i, 1])
def modify_edges(self):
self.line_handle = []
if not self.connect==None:
self.I, self.J = np.nonzero(self.connect)
for rod, i, j in zip(self.rods, self.I, self.J):
rod.pos, rod.axis = self.pos_axis(i, j)
def pos_axis(self, i, j):
pos = []
axis = []
pos.append(self.vals[i, 0])
axis.append(self.vals[j, 0]-self.vals[i,0])
pos.append(self.vals[i, 2])
axis.append(self.vals[j, 2]-self.vals[i,2])
pos.append(self.vals[i, 1])
axis.append(self.vals[j, 1]-self.vals[i,1])
return pos, axis
def modify(self, vals):
self.vals = vals.copy()
self.process_values()
self.modify_edges()
self.modify_vertices()
def process_values(self):
raise NotImplementedError("this needs to be implemented to use the data_show class")
class mocap_data_show(matplotlib_show):
"""Base class for visualizing motion capture data."""
def __init__(self, vals, axes=None, connect=None, color='b'):
if axes==None:
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d', aspect='equal')
matplotlib_show.__init__(self, vals, axes)
self.color = color
self.connect = connect
self.process_values()
self.initialize_axes()
self.draw_vertices()
self.finalize_axes()
self.draw_edges()
self.axes.figure.canvas.draw()
def draw_vertices(self):
self.points_handle = self.axes.scatter(self.vals[:, 0], self.vals[:, 1], self.vals[:, 2], color=self.color)
def draw_edges(self):
self.line_handle = []
if not self.connect==None:
x = []
y = []
z = []
self.I, self.J = np.nonzero(self.connect)
for i, j in zip(self.I, self.J):
x.append(self.vals[i, 0])
x.append(self.vals[j, 0])
x.append(np.NaN)
y.append(self.vals[i, 1])
y.append(self.vals[j, 1])
y.append(np.NaN)
z.append(self.vals[i, 2])
z.append(self.vals[j, 2])
z.append(np.NaN)
self.line_handle = self.axes.plot(np.array(x), np.array(y), np.array(z), '-', color=self.color)
def modify(self, vals):
self.vals = vals.copy()
self.process_values()
self.initialize_axes_modify()
self.draw_vertices()
self.initialize_axes()
#self.finalize_axes_modify()
self.draw_edges()
self.axes.figure.canvas.draw()
def process_values(self):
raise NotImplementedError("this needs to be implemented to use the data_show class")
def initialize_axes(self, boundary=0.05):
"""Set up the axes with the right limits and scaling."""
bs = [(self.vals[:, i].max()-self.vals[:, i].min())*boundary for i in range(3)]
self.x_lim = np.array([self.vals[:, 0].min()-bs[0], self.vals[:, 0].max()+bs[0]])
self.y_lim = np.array([self.vals[:, 1].min()-bs[1], self.vals[:, 1].max()+bs[1]])
self.z_lim = np.array([self.vals[:, 2].min()-bs[2], self.vals[:, 2].max()+bs[2]])
def initialize_axes_modify(self):
self.points_handle.remove()
self.line_handle[0].remove()
def finalize_axes(self):
# self.axes.set_xlim(self.x_lim)
# self.axes.set_ylim(self.y_lim)
# self.axes.set_zlim(self.z_lim)
# self.axes.auto_scale_xyz([-1., 1.], [-1., 1.], [-1., 1.])
extents = np.array([getattr(self.axes, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.axes, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
# self.axes.set_aspect('equal')
# self.axes.autoscale(enable=False)
def finalize_axes_modify(self):
self.axes.set_xlim(self.x_lim)
self.axes.set_ylim(self.y_lim)
self.axes.set_zlim(self.z_lim)
class stick_show(mocap_data_show):
"""Show a three dimensional point cloud as a figure. Connect elements of the figure together using the matrix connect."""
def __init__(self, vals, connect=None, axes=None):
if len(vals.shape)==1:
vals = vals[None,:]
mocap_data_show.__init__(self, vals, axes=axes, connect=connect)
def process_values(self):
self.vals = self.vals.reshape((3, self.vals.shape[1]/3)).T
class skeleton_show(mocap_data_show):
"""data_show class for visualizing motion capture data encoded as a skeleton with angles."""
def __init__(self, vals, skel, axes=None, padding=0, color='b'):
"""data_show class for visualizing motion capture data encoded as a skeleton with angles.
:param vals: set of modeled angles to use for printing in the axis when it's first created.
:type vals: np.array
:param skel: skeleton object that has the parameters of the motion capture skeleton associated with it.
:type skel: mocap.skeleton object
:param padding:
:type int
"""
self.skel = skel
self.padding = padding
connect = skel.connection_matrix()
mocap_data_show.__init__(self, vals, axes=axes, connect=connect, color=color)
def process_values(self):
"""Takes a set of angles and converts them to the x,y,z coordinates in the internal prepresentation of the class, ready for plotting.
:param vals: the values that are being modelled."""
if self.padding>0:
channels = np.zeros((self.vals.shape[0], self.vals.shape[1]+self.padding))
channels[:, 0:self.vals.shape[0]] = self.vals
else:
channels = self.vals
vals_mat = self.skel.to_xyz(channels.flatten())
self.vals = np.zeros_like(vals_mat)
# Flip the Y and Z axes
self.vals[:, 0] = vals_mat[:, 0].copy()
self.vals[:, 1] = vals_mat[:, 2].copy()
self.vals[:, 2] = vals_mat[:, 1].copy()
def wrap_around(self, lim, connect):
quot = lim[1] - lim[0]
self.vals = rem(self.vals, quot)+lim[0]
nVals = floor(self.vals/quot)
for i in range(connect.shape[0]):
for j in find(connect[i, :]):
if nVals[i] != nVals[j]:
connect[i, j] = False
return connect
def data_play(Y, visualizer, frame_rate=30):
"""Play a data set using the data_show object given.
:Y: the data set to be visualized.
:param visualizer: the data show objectwhether to display during optimisation
:type visualizer: data_show
Example usage:
This example loads in the CMU mocap database (http://mocap.cs.cmu.edu) subject number 35 motion number 01. It then plays it using the mocap_show visualize object.
.. code-block:: python
data = GPy.util.datasets.cmu_mocap(subject='35', train_motions=['01'])
Y = data['Y']
Y[:, 0:3] = 0. # Make figure walk in place
visualize = GPy.util.visualize.skeleton_show(Y[0, :], data['skel'])
GPy.util.visualize.data_play(Y, visualize)
"""
for y in Y:
visualizer.modify(y[None, :])
time.sleep(1./float(frame_rate))
| bsd-3-clause |
andaag/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
ajfriend/cvxpy | examples/extensions/kmeans.py | 11 | 3555 | import cvxpy as cvx
import mixed_integer as mi
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show() | gpl-3.0 |
mdeff/ntds_2017 | projects/reports/lastfm_recommendation/helpers.py | 1 | 9955 | import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import pandas as pd
import os.path
import networkx as nx
def generate_stubs(g):
"""Generates lists of stubs containing `d` stubs for each node, where `d` is the degree of the node."""
stubs_array = np.array([], dtype=np.int32)
# adds num_of_stubs stubs for every node index according to its degree
index_degree_pairs = sorted(list(dict(nx.degree(g)).items()), key=lambda x: x[0])
for ind, num_stubs in index_degree_pairs:
if num_stubs != 0:
stubs_array = np.append(stubs_array, ([ind]*num_stubs))
np.random.shuffle(stubs_array)
return stubs_array
def get_max_pair(pairs):
"""Returns the index-degree pair, corresponding to the element with at most stubs."""
pair = sorted(pairs, key=lambda x: x[1], reverse=True)[0]
if pair[1] == 0:
return None
return pair
def greedy_configuration(g):
"""Generates a random graph with degree distribution as close as possible to the graph passed as
argument to the function."""
stubs = generate_stubs(g)
graph = nx.empty_graph()
pairs = dict(nx.degree(g)) # index-degree pairs
highest = get_max_pair(list(pairs.items()))
# Used to keep up with the number of processed stubs in every moment
total = sum([p[1] for p in list(pairs.items())])/2
processed = 0
while highest != None:
source = highest[0] # the node that is the source in this itteration
# delete the stubs that correspond to the stubs of the source in order to prevent loops
elem_indices = np.where(stubs == source)
stubs = np.delete(stubs, elem_indices)
# break if you have no stubs to connect to except the ones that create self loops
if len(stubs) == 0:
print("Breaking in advance to prevent self-loops")
print("Only stubs of node %d left" % source)
break
stubs_left = highest[1]
while stubs_left != 0: # loop until you use all of the source stubs
if len(stubs) == 0: # break if no stubs to connect to are left
print("Breaking while processing to prevent self-loops")
print("Only stubs of node %d left" % source)
break
# choose a random stub connect it to the source and remove it from the list of stubs
target_index = np.random.choice(len(stubs))
target = stubs[target_index]
if graph.has_edge(source, target):
elem_indices = np.where(stubs == target)
if len(np.delete(stubs, elem_indices)) == 0:
print("Breaking while processing to prevent self-loops")
print("Only stubs of node %d and node %s left" % (source, target))
pairs[source] = -pairs[source]
break
else:
continue
else:
graph.add_edge(source, target, weight = np.random.rand())
stubs = np.delete(stubs, target_index)
pairs[target] = pairs[target] - 1
pairs[source] = pairs[source] - 1
stubs_left = stubs_left - 1
# Used to keep up with the number of processed stubs in every moment
processed = processed + highest[1] - stubs_left
#print("Processed %d / %d" % (processed, total))
highest = get_max_pair(list(pairs.items()))
return (graph, pairs)
def generate_user_user_matrix_from_artist_artist_matrix(user_artist_matrix, artist_artist_matrix):
"""Infers user-user connections based on artist similarity and listening counts contained in the user_artist matrix"""
friend_friend_reconstructed = np.zeros((user_artist_matrix.shape[0],user_artist_matrix.shape[0]))
for i in range(user_artist_matrix.shape[0]):
# select the row containing artists connected to user i
user_artists_weights = user_artist_matrix[i]
# get the indices of artists connected to user i
non_zero_artist_weights_indices = list(np.where(user_artists_weights != 0)[0])
# save the position of the user at user_pos_1
user_pos_1 = i
# loop through all of artists connected to the user at position user_pos_1
for j, artist_pos in enumerate(non_zero_artist_weights_indices):
# save the weight of the connection between user at position user_pos_1 and the artist at position artist_pos
weight_1 = user_artist_matrix[user_pos_1,artist_pos]
# select the column containing the connections to users for artist at position artist_pos
artist_to_users = user_artist_matrix[:,artist_pos]
# get the indices of users connected to artist at artist_pos
non_zero_user_weights_indices = list(np.where(artist_to_users != 0)[0])
non_zero_user_weights_indices.remove(user_pos_1)
# loop through all of the users connected to the artist at artist_pos
for z, user_pos_2 in enumerate(non_zero_user_weights_indices):
# save the weight of the connection between user at user_pos_2 and the artist at position artist_pos
weight_2 = user_artist_matrix[user_pos_2,artist_pos]
# set the strength of the connection to the minimum of the two weights
weight = min(weight_1,weight_2)
# increase the similarity between the users at positions user_pos_1 and user_pos_2 for the strength
# of the path between them
friend_friend_reconstructed[user_pos_1,user_pos_2] = friend_friend_reconstructed[user_pos_1,user_pos_2] \
+ weight
for i in range(user_artist_matrix.shape[0]):
# select the row containing artists connected to user i
user_artists_weights = user_artist_matrix[i]
# get the indices of artists connected to user i
non_zero_artist_weights_indices = list(np.where(user_artists_weights != 0)[0])
# save the position of the user at user_pos_1
user_pos_1 = i
# loop through all of artists connected to the user at position user_pos_1
for j, artist_pos in enumerate(non_zero_artist_weights_indices):
# save the weight of the connection between user at position user_pos_1 and the artist at position artist_pos
weight_1 = user_artist_matrix[user_pos_1, artist_pos]
# get the indices for the artists similar to the artist at artist_pos
similar_artists_indices = np.where(artist_artist_matrix[artist_pos] != 0)[0]
# loop through all the artist similar to the artist at position artist_pos
for w, similar_artist_pos in enumerate(similar_artists_indices):
# save the similarity strength between artist at positions artist_pos and similar_artist_pos
similarity_strength = artist_artist_matrix[artist_pos, similar_artist_pos]
# select the column containing the connections to users for artist at position similar_artist_pos
artist_to_users = user_artist_matrix[:, similar_artist_pos]
# get the indices of users connected to artist at artist_pos
non_zero_user_weights_indices = list(np.where(artist_to_users != 0)[0])
if user_pos_1 in non_zero_user_weights_indices:
continue
users_connected_to_prev = list(np.where(user_artist_matrix[:, artist_pos] != 0)[0])
# loop through all of the users connected to the artist at similar_artist_pos
for z, user_pos_2 in enumerate(non_zero_user_weights_indices):
if user_pos_2 in users_connected_to_prev:
continue
# save the weight of the connection between user at user_pos_2 and the artist at similar_artist_pos
weight_2 = user_artist_matrix[user_pos_2, similar_artist_pos]
# set the strength of the connection to the minimum of the two weights,
# rescaled with the similarity strength between the two artists
weight = min(weight_1,weight_2)*similarity_strength
# increase the similarity between the users at positions user_pos_1 and user_pos_2 for the strength
# of the path between them
friend_friend_reconstructed[user_pos_1, user_pos_2] = friend_friend_reconstructed[user_pos_1,user_pos_2] \
+ weight
return friend_friend_reconstructed
def compare_networks(original, constructed):
"""Compares the two networks in terms of links in the first network that have been detected in the second one"""
detected = 0
not_detected = 0
for node_one, node_two, weight in original.edges(data='weight'):
if constructed.has_edge(node_one, node_two):
detected = detected + 1
else:
not_detected = not_detected + 1
print("The total number of detected links is %d." % detected)
print("The total number of not detected links is %d." % not_detected)
# Used for testing
def sample_graph_bfs(G, sample_size, source_node):
"""A helper used to sample a graph from a source node, containing a desired number of nodes"""
visited = set()
queue = []
queue.append(source_node)
while (len(queue) != 0) and (len(visited) < sample_size):
curr_node = queue.pop(0)
if curr_node in visited:
continue
visited.add(curr_node)
neighbors = G.neighbors(curr_node)
queue = queue + list(neighbors)
return copy.deepcopy(G.subgraph(visited)) | mit |
aburrell/davitpy | davitpy/pydarn/plotting/musicPlot.py | 2 | 89257 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""musicPlot module
A module for plotting objects created and processed with the pydarn.proc.music module.
Notes
-----
Please see the pydarn.proc.music module documentation and the iPython notebooks included in the docs
folder of the DaViTPy distribution.
Module author: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------
daynight_terminator Calculate day/night terminator
plotRelativeRanges cell distances
rangeBeamPlot range versus beam
timeSeriesMultiPlot time series
spectrumMultiPlot 1D line spectral data
multiPlot time series or spectral data
plotFullSpectrum full spectrum of musicArray
plotDlm cross spectral matrix
plotKarr horizontal wave number
plotKarrDetected add in use of detectSignals()
plotKarrAxis Karr plot without titles
--------------------------------------------------
Classes
---------------------------------------
musicFan fan plot of musicArray data
musicRTI RTI plot of musicArray data
---------------------------------------
"""
import numpy as np
import scipy as sp
import datetime
from matplotlib.collections import PolyCollection
from matplotlib.patches import Polygon
from matplotlib import dates as md
import matplotlib
from mpl_toolkits.basemap import Basemap
from davitpy import utils
from davitpy.pydarn.radar.radUtils import getParamDict
from davitpy.pydarn.proc.music import getDataSet
import logging
#Global Figure Size
figsize=(20,10)
def daynight_terminator(date, lons):
"""Calculates the latitude, Greenwich Hour Angle, and solar
declination from a given latitude and longitude.
This routine is used by musicRTI for terminator calculations.
Parameters
----------
date : datetime.datetime
UT date and time of terminator calculation.
lons : np.array
Longitudes of which to calculate the terminator.
Returns
-------
lats : np.array
Latitudes of solar terminator.
tau : np.array
Greenwhich Hour Angle.
dec : np.array
Solar declination.
Notes
-----
Adapted from mpl_toolkits.basemap.solar by Nathaniel A. Frissell, Fall 2013
"""
import mpl_toolkits.basemap.solar as solar
dg2rad = np.pi/180.
# compute greenwich hour angle and solar declination
# from datetime object (assumed UTC).
tau, dec = solar.epem(date)
# compute day/night terminator from hour angle, declination.
longitude = lons + tau
lats = np.arctan(-np.cos(longitude*dg2rad)/np.tan(dec*dg2rad))/dg2rad
return lats,tau,dec
class musicFan(object):
"""Class to plot a fan plot using a pydarn.proc.music.musicArray object as the data source.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
Which dataSet in the musicArray object to plot
time : Optional[None or datetime.datetime]
Time scan plot. If None, the first time in dataSet will be used.
axis : Optional[None or matplotlib.figure.axis]
Matplotlib axis on which to plot. If None, a new figure and axis will be created.
scale : Optional[None or 2-Element iterable]
Colorbar scale. If None, the default scale for the current SuperDARN parameter will be used.
autoScale : Optional[bool]
If True, automatically scale the color bar for good data visualization. Keyword scale must
be None when using autoScale.
plotZeros : Optional[bool]
If True, plot cells that are exactly 0.
markCell : Optional[None or 2-Element iterable]
Mark the (beam, rangeGate) with black.
markBeam : Optional[None or int]
Mark a chosen beam.
markBeam_dict : Optional[dict]
dictionary of keywords defining markBeam line properties.
plotTerminator : Optional[bool]
If True, overlay day/night terminator on map. Uses Basemap's nightshade.
plot_title : Optional[bool]
If True, plot the title information
title : Optional[str]
Overide default title text.
parallels_ticks : Optional[list]
Where to draw the parallel (latitude) lines
meridians_ticks : Optional[list]
Where to draw the meridian (longitude) lines
zoom : Optional[float]
Multiply the map height and width by this factor (bigger number shows more area).
lat_shift : Optional[float]
Add this number to the computed lat_0 sent to basemap.
lon_shift : Optional[float]
Add this number to the computed lon_0 sent to basemap.
cmap_handling : Optional[str]
'superdarn' to use SuperDARN-style colorbars, 'matplotlib' for direct use of matplotlib's colorbars.
'matplotlib' is recommended when using custom scales and the 'superdarn' mode is not providing a desirable result.
cmap : Optional[one or matplotlib colormap object]
If Nonei and cmap_handling=='matplotlib', use jet.
plot_cbar : Optional[bool]
If True, plot the color bar.
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
Fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
Fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
Fontsize of "Ground Scatter Only" text
model_text_size : Optional[int]
fontsize of model and coordinate indicator text
draw_coastlines : Optional[bool]
If True, draw the coastlines.
basemap_dict : Optional[dict]
Dictionary of keywords sent to the basemap invocation
**kwArgs
Keyword Arguments
Attributes
----------
map_obj
pcoll
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,dataObject,
dataSet = 'active',
time = None,
axis = None,
scale = None,
autoScale = False,
plotZeros = False,
markCell = None,
markBeam = None,
markBeam_dict = {'color':'white','lw':2},
plotTerminator = True,
parallels_ticks = None,
meridians_ticks = None,
zoom = 1.,
lat_shift = 0.,
lon_shift = 0.,
cmap_handling = 'superdarn',
cmap = None,
plot_cbar = True,
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
model_text_size = 'small',
draw_coastlines = True,
basemap_dict = {},
plot_title = True,
title = None,
**kwArgs):
if axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
from scipy import stats
# Make some variables easier to get to...
currentData = getDataSet(dataObject,dataSet)
metadata = currentData.metadata
latFull = currentData.fov.latFull
lonFull = currentData.fov.lonFull
coords = metadata['coords']
# Translate parameter information from short to long form.
paramDict = getParamDict(metadata['param'])
if paramDict.has_key('label'):
param = paramDict['param']
cbarLabel = paramDict['label']
else:
param = 'width' # Set param = 'width' at this point just to not screw up the colorbar function.
cbarLabel = metadata['param']
# Set colorbar scale if not explicitly defined.
if(scale is None):
if autoScale:
sd = stats.nanstd(np.abs(currentData.data),axis=None)
mean = stats.nanmean(np.abs(currentData.data),axis=None)
scMax = np.ceil(mean + 1.*sd)
if np.min(currentData.data) < 0:
scale = scMax*np.array([-1.,1.])
else:
scale = scMax*np.array([0.,1.])
else:
if paramDict.has_key('range'):
scale = paramDict['range']
else:
scale = [-200,200]
# See if an axis is provided... if not, set one up!
if axis is None:
axis = fig.add_subplot(111)
else:
fig = axis.get_figure()
# Figure out which scan we are going to plot...
if time is None:
timeInx = 0
else:
timeInx = (np.where(currentData.time >= time))[0]
if np.size(timeInx) == 0:
timeInx = -1
else:
timeInx = int(np.min(timeInx))
# do some stuff in map projection coords to get necessary width and height of map
lonFull,latFull = (np.array(lonFull)+360.)%360.,np.array(latFull)
goodLatLon = np.logical_and( np.logical_not(np.isnan(lonFull)), np.logical_not(np.isnan(latFull)) )
goodInx = np.where(goodLatLon)
goodLatFull = latFull[goodInx]
goodLonFull = lonFull[goodInx]
tmpmap = Basemap(projection='npstere', boundinglat=20,lat_0=90, lon_0=np.mean(goodLonFull))
x,y = tmpmap(goodLonFull,goodLatFull)
minx = x.min()
miny = y.min()
maxx = x.max()
maxy = y.max()
width = (maxx-minx)
height = (maxy-miny)
cx = minx + width/2.
cy = miny + height/2.
lon_0,lat_0 = tmpmap(cx, cy, inverse=True)
lon_0 = np.mean(goodLonFull)
dist = width/50.
# Fill the entire subplot area without changing the data aspect ratio.
bbox = axis.get_window_extent()
bbox_width = bbox.width
bbox_height = bbox.height
ax_aspect = bbox_width / bbox_height
map_aspect = width / height
if map_aspect < ax_aspect:
width = (height*bbox_width) / bbox_height
if map_aspect > ax_aspect:
height = (width*bbox_height) / bbox_width
# Zoom!
width = zoom * width
height = zoom * height
lat_0 = lat_0 + lat_shift
lon_0 = lon_0 + lon_shift
bmd = basemap_dict.copy()
width = bmd.pop('width', width)
height = bmd.pop('height', height)
lat_0 = bmd.pop('lat_0', lat_0)
lon_0 = bmd.pop('lon_0', lon_0)
# draw the actual map we want
m = Basemap(projection='stere',width=width,height=height,lon_0=lon_0,lat_0=lat_0,ax=axis,**bmd)
if parallels_ticks is None:
parallels_ticks = np.arange(-80.,81.,10.)
if meridians_ticks is None:
meridians_ticks = np.arange(-180.,181.,20.)
m.drawparallels(parallels_ticks,labels=[1,0,0,0])
m.drawmeridians(meridians_ticks,labels=[0,0,0,1])
if(coords == 'geo') and draw_coastlines == True:
m.drawcoastlines(linewidth=0.5,color='k')
m.drawmapboundary(fill_color='w')
m.fillcontinents(color='w', lake_color='w')
# Plot the SuperDARN data!
ngates = np.shape(currentData.data)[2]
nbeams = np.shape(currentData.data)[1]
verts = []
scan = []
data = currentData.data[timeInx,:,:]
for bm in range(nbeams):
for rg in range(ngates):
if goodLatLon[bm,rg] == False: continue
if np.isnan(data[bm,rg]): continue
if data[bm,rg] == 0 and not plotZeros: continue
scan.append(data[bm,rg])
x1,y1 = m(lonFull[bm+0,rg+0],latFull[bm+0,rg+0])
x2,y2 = m(lonFull[bm+1,rg+0],latFull[bm+1,rg+0])
x3,y3 = m(lonFull[bm+1,rg+1],latFull[bm+1,rg+1])
x4,y4 = m(lonFull[bm+0,rg+1],latFull[bm+0,rg+1])
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if (cmap_handling == 'matplotlib') or autoScale:
if cmap is None:
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
elif cmap_handling == 'superdarn':
colors = 'lasse'
cmap,norm,bounds = utils.plotUtils.genCmap(param,scale,colors=colors)
# pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll = PolyCollection(np.array(verts),edgecolors='face',closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Mark Cell
if markCell is not None:
beamInx = int(np.where(currentData.fov.beams == markCell[0])[0])
gateInx = int(np.where(currentData.fov.gates == markCell[1])[0])
x1,y1 = m(lonFull[beamInx+0,gateInx+0],latFull[beamInx+0,gateInx+0])
x2,y2 = m(lonFull[beamInx+1,gateInx+0],latFull[beamInx+1,gateInx+0])
x3,y3 = m(lonFull[beamInx+1,gateInx+1],latFull[beamInx+1,gateInx+1])
x4,y4 = m(lonFull[beamInx+0,gateInx+1],latFull[beamInx+0,gateInx+1])
mkv = np.array([[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x1,y1]])
poly = Polygon(mkv,facecolor='#000000',edgecolor='none',zorder=100)
axis.add_patch(poly)
# Mark Beam
if markBeam is not None:
beamInx = int(np.where(currentData.fov.beams == markBeam)[0])
startedMarking = False
for gateInx in range(ngates):
if goodLatLon[beamInx,gateInx] == False: continue
x1,y1 = m(lonFull[beamInx+0,gateInx+0],latFull[beamInx+0,gateInx+0])
x2,y2 = m(lonFull[beamInx+1,gateInx+0],latFull[beamInx+1,gateInx+0])
x3,y3 = m(lonFull[beamInx+1,gateInx+1],latFull[beamInx+1,gateInx+1])
x4,y4 = m(lonFull[beamInx+0,gateInx+1],latFull[beamInx+0,gateInx+1])
axis.plot([x1,x4],[y1,y4],zorder=150,**markBeam_dict)
axis.plot([x2,x3],[y2,y3],zorder=150,**markBeam_dict)
if not startedMarking:
axis.plot([x1,x2],[y1,y2],zorder=150,**markBeam_dict)
startedMarking = True
if gateInx == ngates-1:
axis.plot([x3,x4],[y3,y4],zorder=150,**markBeam_dict)
dataName = currentData.history[max(currentData.history.keys())] # Label the plot with the current level of data processing.
if plot_title:
if title is None:
axis.set_title(metadata['name']+' - '+dataName+currentData.time[timeInx].strftime('\n%Y %b %d %H%M UT'))
else:
axis.set_title(title)
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction)
cbar.set_label(cbarLabel)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
txt = 'Coordinates: ' + metadata['coords'] +', Model: ' + metadata['model']
axis.text(1.01, 0, txt,
horizontalalignment='left',
verticalalignment='bottom',
rotation='vertical',
size=model_text_size,
transform=axis.transAxes)
if plotTerminator:
m.nightshade(currentData.time[timeInx])
self.map_obj = m
self.pcoll = pcoll
class musicRTI(object):
"""Class to create an RTI plot using a pydarn.proc.music.musicArray object as the data source.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
beam : Optional[int]
Beam number to plot.
xlim : Optoinal[None or 2-element iterable of datetime.datetime]
Limits for x-axis.
ylim : Optional[None or 2-element iterable of floats]
Limits for y-axis.
axis : Optional[None or matplotlib.figure.axis]
Matplotlib axis on which to plot. If None, a new figure and axis will be created.
scale : Optional[None or 2-Element iterable]
Colorbar scale. If None, the default scale for the current SuperDARN parameter will be used.
plotZeros : Optional[bool]
If True, plot data cells that are identically zero.
max_sounding_time : Optional[None or datetime.timedelta]
Do not allow data to be plotted for longer than this duration.
xBoundaryLimits: Optional[None or 2-element iterable of datetime.datetime]
Mark a region of times on the RTI plot. A green dashed vertical line will be plotted
at each of the boundary times. The region of time outside of the boundary will be shaded gray.
If set to None, this will automatically be set to the timeLimits set in the metadata, if they exist.
yBoundaryLimits : Optional[None or 2-element iterable of floats]
Mark a region of range on the RTI plot. A green dashed horizontal line will be plotted
at each of the boundary ranges. The region of time outside of the boundary will be shaded gray.
If set to None, this will automatically be set to the gateLimits set in the metadata, if they exist.
yticks : Optional[list]
Where to put the ticks on the y-axis.
ytick_lat_format : Optional[str]
%-style string format code for latitude y-tick labels
autoScale : Optional[bool]
If True, automatically scale the color bar for good data visualization. Keyword scale must be None when using autoScale.
ax.set_xlim(xlim)
plotTerminator : Optional[bool]
If True, overlay day/night terminator on the RTI plot. Every cell is evaluated for day/night and shaded accordingly. Therefore,
terminator resolution will match the resolution of the RTI plot data.
axvlines : Optional[None or list of datetime.datetime]
Dashed vertical lines will be drawn at each specified datetime.datetime.
axvline_color : Optional[str]
Matplotlib color code specifying color of the axvlines.
secondary_coords : Optional[str]
Secondary coordate system for RTI plot y-axis ('lat' or 'range')
plot_info : Optional[bool]
If True, plot frequency/noise plots
plot_title : Optional[bool]
If True, plot the title information
plot_range_limits_label : Optoinal[bool]
If True, plot the label corresponding to the range limits on the right-hand y-axis.
cmap_handling : Optional[str]
'superdarn' to use SuperDARN-style colorbars, 'matplotlib' for direct use of matplotlib's colorbars.
'matplotlib' is recommended when using custom scales and the 'superdarn' mode is not providing a desirable result.
plot_cbar : Optional[bool]
If True, plot the color bar.
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
model_text_size : Optional[int]
fontsize of model and coordinate indicator text
**kwArgs :
Keyword Arguments
Attributes
----------
cbar_info : list
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,dataObject,
dataSet = 'active',
beam = 7,
coords = 'gate',
xlim = None,
ylim = None,
axis = None,
scale = None,
plotZeros = False,
max_sounding_time = datetime.timedelta(minutes=4),
xBoundaryLimits = None,
yBoundaryLimits = None,
yticks = None,
ytick_lat_format = '.0f',
autoScale = False,
plotTerminator = True,
axvlines = None,
axvline_color = '0.25',
secondary_coords = 'lat',
plot_info = True,
plot_title = True,
plot_range_limits_label = True,
cmap_handling = 'superdarn',
cmap = None,
bounds = None,
norm = None,
plot_cbar = True,
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
model_text_size = 'small',
y_labelpad = None,
**kwArgs):
from scipy import stats
from rti import plot_freq,plot_nave,plot_skynoise,plot_searchnoise
if axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
# Make some variables easier to get to...
currentData = getDataSet(dataObject,dataSet)
metadata = currentData.metadata
latFull = currentData.fov.latFull
lonFull = currentData.fov.lonFull
latCenter = currentData.fov.latCenter
lonCenter = currentData.fov.lonCenter
time = currentData.time
beamInx = np.where(currentData.fov.beams == beam)[0]
radar_lats = latCenter[beamInx,:]
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
# Calculate terminator. ########################################################
if plotTerminator:
daylight = np.ones([nrTimes,nrGates],np.bool)
for tm_inx in range(nrTimes):
tm = time[tm_inx]
term_lons = lonCenter[beamInx,:]
term_lats,tau,dec = daynight_terminator(tm,term_lons)
if dec > 0: # NH Summer
day_inx = np.where(radar_lats < term_lats)[1]
else:
day_inx = np.where(radar_lats > term_lats)[1]
if day_inx.size != 0:
daylight[tm_inx,day_inx] = False
# Translate parameter information from short to long form.
paramDict = getParamDict(metadata['param'])
if paramDict.has_key('label'):
param = paramDict['param']
cbarLabel = paramDict['label']
else:
param = 'width' # Set param = 'width' at this point just to not screw up the colorbar function.
cbarLabel = metadata['param']
# Set colorbar scale if not explicitly defined.
if(scale is None):
if autoScale:
sd = stats.nanstd(np.abs(currentData.data),axis=None)
mean = stats.nanmean(np.abs(currentData.data),axis=None)
scMax = np.ceil(mean + 1.*sd)
if np.min(currentData.data) < 0:
scale = scMax*np.array([-1.,1.])
else:
scale = scMax*np.array([0.,1.])
else:
if paramDict.has_key('range'):
scale = paramDict['range']
else:
scale = [-200,200]
# See if an axis is provided... if not, set one up!
if axis is None:
axis = fig.add_subplot(111)
else:
fig = axis.get_figure()
if np.size(beamInx) == 0:
beamInx = 0
beam = currentData.fov.beams[0]
# Plot the SuperDARN data!
verts = []
scan = []
data = np.squeeze(currentData.data[:,beamInx,:])
# The coords keyword needs to be tested better. For now, just allow 'gate' only.
# Even in 'gate' mode, the geographic latitudes are plotted along with gate.
# if coords is None and metadata.has_key('coords'):
# coords = metadata['coords']
#
if coords not in ['gate','range']:
logging.warning('Coords "%s" not supported for RTI plots. Using "gate".' % coords)
coords = 'gate'
if coords == 'gate':
rnge = currentData.fov.gates
elif coords == 'range':
rnge = currentData.fov.slantRFull[beam,:]
xvec = [matplotlib.dates.date2num(x) for x in currentData.time]
for tm in range(nrTimes-1):
for rg in range(nrGates-1):
if np.isnan(data[tm,rg]): continue
if data[tm,rg] == 0 and not plotZeros: continue
if max_sounding_time is not None:
if (currentData.time[tm+1] - currentData.time[tm+0]) > max_sounding_time: continue
scan.append(data[tm,rg])
x1,y1 = xvec[tm+0],rnge[rg+0]
x2,y2 = xvec[tm+1],rnge[rg+0]
x3,y3 = xvec[tm+1],rnge[rg+1]
x4,y4 = xvec[tm+0],rnge[rg+1]
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if (cmap_handling == 'matplotlib') or autoScale:
if cmap is None:
cmap = matplotlib.cm.jet
if bounds is None:
bounds = np.linspace(scale[0],scale[1],256)
if norm is None:
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
elif cmap_handling == 'superdarn':
colors = 'lasse'
cmap,norm,bounds = utils.plotUtils.genCmap(param,scale,colors=colors)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Plot the terminator! #########################################################
if plotTerminator:
# print 'Terminator functionality is disabled until further testing is completed.'
term_verts = []
term_scan = []
rnge = currentData.fov.gates
xvec = [matplotlib.dates.date2num(x) for x in currentData.time]
for tm in range(nrTimes-1):
for rg in range(nrGates-1):
if daylight[tm,rg]: continue
term_scan.append(1)
x1,y1 = xvec[tm+0],rnge[rg+0]
x2,y2 = xvec[tm+1],rnge[rg+0]
x3,y3 = xvec[tm+1],rnge[rg+1]
x4,y4 = xvec[tm+0],rnge[rg+1]
term_verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
term_pcoll = PolyCollection(np.array(term_verts),facecolors='0.45',linewidth=0,zorder=99,alpha=0.25)
axis.add_collection(term_pcoll,autolim=False)
################################################################################
if axvlines is not None:
for line in axvlines:
axis.axvline(line,color=axvline_color,ls='--')
if xlim is None:
xlim = (np.min(time),np.max(time))
axis.set_xlim(xlim)
axis.xaxis.set_major_formatter(md.DateFormatter('%H:%M'))
axis.set_xlabel('Time [UT]')
if ylim is None:
ylim = (np.min(rnge),np.max(rnge))
axis.set_ylim(ylim)
if yticks is not None:
axis.set_yticks(yticks)
# Y-axis labeling ##############################################################
if coords == 'gate':
if secondary_coords:
if secondary_coords == 'range':
if metadata['model'] == 'IS':
axis.set_ylabel('Range Gate\nSlant Range [km]',labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('Range Gate\nGS Mapped Range [km]',labelpad=y_labelpad)
else:
geo_mag = 'Geographic' if currentData.fov.coords == 'geo' else 'Magnetic'
if metadata['model'] == 'IS':
axis.set_ylabel('Range Gate\n%s Latitude' % geo_mag,labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('Range Gate\nGS Mapped %s Latitude' % geo_mag,labelpad=y_labelpad)
yticks = axis.get_yticks()
ytick_str = []
for tck in yticks:
txt = []
txt.append('%d' % tck)
rg_inx = np.where(tck == currentData.fov.gates)[0]
if np.size(rg_inx) != 0:
if secondary_coords == 'range':
rang = currentData.fov.slantRCenter[beamInx,rg_inx]
if np.isfinite(rang):
txt.append('%d' % rang)
else:
txt.append('')
else:
lat = currentData.fov.latCenter[beamInx,rg_inx]
if np.isfinite(lat):
txt.append((u'%'+ytick_lat_format+'$^o$') % lat)
else:
txt.append('')
txt = '\n'.join(txt)
ytick_str.append(txt)
axis.set_yticklabels(ytick_str,rotation=90,ma='center')
else:
axis.set_ylabel('Range Gate',labelpad=y_labelpad)
elif coords == 'range':
if secondary_coords == 'lat':
# Use linear interpolation to get the latitude associated with a particular range.
# Make sure we only include finite values in the interpolation function.
finite_inx = np.where(np.isfinite(currentData.fov.latCenter[beam,:]))[0]
tmp_ranges = currentData.fov.slantRCenter[beam,:][finite_inx]
tmp_lats = currentData.fov.latCenter[beam,:][finite_inx]
tmp_fn = sp.interpolate.interp1d(tmp_ranges,tmp_lats)
yticks = axis.get_yticks()
ytick_str = []
for tck in yticks:
txt = []
# Append Latitude
try:
lat = tmp_fn(tck)
txt.append((u'%'+ytick_lat_format+'$^o$') % lat)
except:
txt.append('')
# Append Range
txt.append('%d' % tck)
txt = '\n'.join(txt)
ytick_str.append(txt) # Put both lat and range on same string
axis.set_yticklabels(ytick_str,rotation=90,ma='center') # Set yticklabels
# Label y-axis
geo_mag = 'Geographic' if currentData.fov.coords == 'geo' else 'Magnetic'
if metadata['model'] == 'IS':
axis.set_ylabel('%s Latitude\nSlant Range [km]' % geo_mag,labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('GS Mapped %s Latitude\nGS Mapped Range [km]' % geo_mag,labelpad=y_labelpad)
else:
if metadata['model'] == 'IS':
axis.set_ylabel('Slant Range [km]',labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('GS Mapped Range [km]',labelpad=y_labelpad)
axis.set_ylim(ylim)
# Shade xBoundary Limits
if xBoundaryLimits is None:
if currentData.metadata.has_key('timeLimits'):
xBoundaryLimits = currentData.metadata['timeLimits']
if xBoundaryLimits is not None:
gray = '0.75'
# axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray,zorder=150,alpha=0.5)
# axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray,zorder=150,alpha=0.5)
axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray,zorder=1)
axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray,zorder=1)
axis.axvline(x=xBoundaryLimits[0],color='g',ls='--',lw=2,zorder=150)
axis.axvline(x=xBoundaryLimits[1],color='g',ls='--',lw=2,zorder=150)
# Shade yBoundary Limits
if yBoundaryLimits is None:
if currentData.metadata.has_key('gateLimits') and coords == 'gate':
yBoundaryLimits = currentData.metadata['gateLimits']
if currentData.metadata.has_key('rangeLimits') and coords == 'range':
yBoundaryLimits = currentData.metadata['rangeLimits']
if yBoundaryLimits is not None:
gray = '0.75'
# axis.axhspan(ylim[0],yBoundaryLimits[0],color=gray,zorder=150,alpha=0.5)
# axis.axhspan(yBoundaryLimits[1],ylim[1],color=gray,zorder=150,alpha=0.5)
axis.axhspan(ylim[0],yBoundaryLimits[0],color=gray,zorder=1)
axis.axhspan(yBoundaryLimits[1],ylim[1],color=gray,zorder=1)
axis.axhline(y=yBoundaryLimits[0],color='g',ls='--',lw=2,zorder=150)
axis.axhline(y=yBoundaryLimits[1],color='g',ls='--',lw=2,zorder=150)
for bnd_item in yBoundaryLimits:
if coords == 'gate':
txt = []
txt.append('%d' % bnd_item)
rg_inx = np.where(bnd_item == currentData.fov.gates)[0]
if np.size(rg_inx) != 0:
lat = currentData.fov.latCenter[beamInx,rg_inx]
if np.isfinite(lat):
txt.append(u'%.1f$^o$' % lat)
else:
txt.append('')
txt = '\n'.join(txt)
else:
txt = '%.1f' % bnd_item
if plot_range_limits_label:
axis.annotate(txt, (1.01, bnd_item) ,xycoords=('axes fraction','data'),rotation=90,ma='center')
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction)
cbar.set_label(cbarLabel)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
txt = 'Model: ' + metadata['model']
axis.text(1.01, 0, txt,
horizontalalignment='left',
verticalalignment='bottom',
rotation='vertical',
size=model_text_size,
transform=axis.transAxes)
# Get axis position information.
pos = list(axis.get_position().bounds)
# Plot frequency and noise information. ########################################
if hasattr(dataObject,'prm') and plot_info:
# Adjust current plot position to fit in the freq and noise plots.
super_plot_hgt = 0.06
pos[3] = pos[3] - (2*super_plot_hgt)
axis.set_position(pos)
# Get current colorbar position and adjust it.
cbar_pos = list(cbar.ax.get_position().bounds)
cbar_pos[1] = pos[1]
cbar_pos[3] = pos[3]
cbar.ax.set_position(cbar_pos)
curr_xlim = axis.get_xlim()
curr_xticks = axis.get_xticks()
pos[1] = pos[1] + pos[3]
pos[3] = super_plot_hgt
freq_pos = pos[:]
pos[1] = pos[1] + super_plot_hgt
noise_pos = pos[:]
skynoise_ax = fig.add_axes(noise_pos, label='sky')
searchnoise_ax = fig.add_axes(noise_pos, label='search', frameon=False)
freq_ax = fig.add_axes(freq_pos, label='freq')
nave_ax = fig.add_axes(freq_pos, label='nave', frameon=False)
# cpid_ax = fig.add_axes(cpid_pos)
plot_freq(freq_ax,dataObject.prm.time,dataObject.prm.tfreq,xlim=curr_xlim,xticks=curr_xticks)
plot_nave(nave_ax,dataObject.prm.time,dataObject.prm.nave,xlim=curr_xlim,xticks=curr_xticks)
plot_skynoise(skynoise_ax,dataObject.prm.time,dataObject.prm.noisesky,xlim=curr_xlim,xticks=curr_xticks)
plot_searchnoise(searchnoise_ax,dataObject.prm.time,dataObject.prm.noisesearch,xlim=curr_xlim,xticks=curr_xticks)
# Put a title on the RTI Plot. #################################################
if plot_title:
title_y = (pos[1] + pos[3]) + 0.015
xmin = pos[0]
xmax = pos[0] + pos[2]
txt = metadata['name']+' ('+metadata['fType']+')'
fig.text(xmin,title_y,txt,ha='left',weight=550)
txt = []
txt.append(xlim[0].strftime('%Y %b %d %H%M UT - ')+xlim[1].strftime('%Y %b %d %H%M UT'))
txt.append(currentData.history[max(currentData.history.keys())]) # Label the plot with the current level of data processing.
txt = '\n'.join(txt)
fig.text((xmin+xmax)/2.,title_y,txt,weight=550,size='large',ha='center')
txt = 'Beam '+str(beam)
fig.text(xmax,title_y,txt,weight=550,ha='right')
cbar_info = {}
cbar_info['cmap'] = cmap
cbar_info['bounds'] = bounds
cbar_info['norm'] = norm
cbar_info['label'] = cbarLabel
cbar_info['ticks'] = cbar_ticks
cbar_info['mappable'] = pcoll
self.cbar_info = cbar_info
def plotRelativeRanges(dataObj,dataSet='active',time=None,fig=None):
"""Plots the N-S and E-W distance from the center cell of a field-of-view in a
pydarn.proc.music.musicArray object. Also plots one scan of the chosen
dataSet, with the center cell marked in black.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
time : Optional[None or datetime.datetime]
Time scan plot. If None, the first time in dataSet will be used.
fig : Optional[None of matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
Returns
-------
fig : None of matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import matplotlib
# Get center of FOV.
ctrBeamInx = currentData.fov.relative_centerInx[0]
ctrGateInx = currentData.fov.relative_centerInx[1]
ctrBeam = currentData.fov.beams[ctrBeamInx]
ctrGate = currentData.fov.gates[ctrGateInx]
ctrLat = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
ctrLon = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
gs = matplotlib.gridspec.GridSpec(3, 2,hspace=None)
axis = fig.add_subplot(gs[0:2, 1])
musicFan(dataObj,time=time,plotZeros=True,dataSet=dataSet,axis=axis,markCell=(ctrBeam,ctrGate))
# Determine the color scale for plotting.
def myround(x, base=50):
return int(base * round(float(x)/base))
absnanmax = np.nanmax(np.abs([currentData.fov.relative_x,currentData.fov.relative_y]))
rnd = myround(absnanmax)
scale = (-rnd, rnd)
# Determine nanmaximum ranges.
xRange = np.nanmax(currentData.fov.relative_x) - np.nanmin(currentData.fov.relative_x)
yRange = np.nanmax(currentData.fov.relative_y) - np.nanmin(currentData.fov.relative_y)
latRange = np.nanmax(currentData.fov.latCenter) - np.nanmin(currentData.fov.latCenter)
lonRange = np.nanmax(currentData.fov.lonCenter) - np.nanmin(currentData.fov.lonCenter)
axis = fig.add_subplot(gs[0:2, 0])
axis.set_axis_off()
text = []
text.append('X-Range [km]: %i' % xRange)
text.append('Y-Range [km]: %i' % yRange)
text.append('Lat Range [deg]: %.1f' % latRange)
text.append('Lon Range [deg]: %.1f' % lonRange)
text.append('Center Lat [deg]: %.1f' % ctrLat)
text.append('Center Lon [deg]: %.1f' % ctrLon)
text = '\n'.join(text)
axis.text(0,0.75,text)
xlabel = 'Beam'
ylabel = 'Gate'
cbarLabel = 'Distance from Center [km]'
axis = fig.add_subplot(gs[2,0])
data = currentData.fov.relative_y
title = 'N-S Distance from Center'
title = '\n'.join([title,'(Beam: %i, Gate: %i)' % (ctrBeam, ctrGate)])
rangeBeamPlot(currentData,data,axis,title=title,xlabel=xlabel,ylabel=ylabel,scale=scale,cbarLabel=cbarLabel)
axis = fig.add_subplot(gs[2,1])
data = currentData.fov.relative_x
title = 'E-W Distance from Center'
title = '\n'.join([title,'(Beam: %i, Gate: %i)' % (ctrBeam, ctrGate)])
rangeBeamPlot(currentData,data,axis,title=title,xlabel=xlabel,ylabel=ylabel,scale=scale,cbarLabel=cbarLabel)
return fig
def rangeBeamPlot(currentData,data,axis,title=None,xlabel=None,ylabel=None,param='velocity',scale=None,cbarLabel=None):
"""Plots data on a range versus beam plot with a colorbar.
Parameters
----------
currentData : pydarn.proc.music.musicDataObj
musicDataObj
data : numpy.array
nBeams x nGates Numpy array of data
axis : matplotlib.axis
matplotlib axis object on which to plot
title : Optional[None or str]
Title of plot.
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
param : Optional[None or str]
Parameter used for colorbar selection.
scale : Optional[None or 2-element iterable]
Two-element colorbar scale.
cbarLabel : Optional[str]
Colorbar label.
Written by Nathaniel A. Frissell, Fall 2013
"""
fig = axis.get_figure()
ngates = len(currentData.fov.gates)
nbeams = len(currentData.fov.beams)
verts = []
scan = []
for bmInx in range(nbeams):
for rgInx in range(ngates):
scan.append(data[bmInx,rgInx])
bm = currentData.fov.beams[bmInx]
rg = currentData.fov.gates[rgInx]
x1,y1 = bm+0, rg+0
x2,y2 = bm+1, rg+0
x3,y3 = bm+1, rg+1
x4,y4 = bm+0, rg+1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if scale is None:
scale = (np.min(scan),np.max(scan))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
axis.set_xlim(min(currentData.fov.beams), max(currentData.fov.beams)+1)
axis.set_ylim(min(currentData.fov.gates), max(currentData.fov.gates)+1)
if title is not None: axis.set_title(title)
if xlabel is not None: axis.set_xlabel(xlabel)
if ylabel is not None: axis.set_ylabel(ylabel)
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
if cbarLabel is not None: cbar.set_label(cbarLabel)
def timeSeriesMultiPlot(dataObj,dataSet='active',dataObj2=None,dataSet2=None,plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,xBoundaryLimits=None):
"""Plots 1D line time series of selected cells in a pydarn.proc.music.musicArray object.
This defaults to 9 cells of the FOV.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optoinal[str]
which dataSet in the musicArray object to plot
dataObj2 : Optional[pydarn.proc.music.musicArray]
A second musicArray object to be overlain on the the first dataObj plot.
dataSet2 : Optional[str]
which dataSet in the second musicArray to plot
plotBeam : Optional[list of int]
list of beams to plot from
plotGate : Optional[list of int]
list of range gates to plot from
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
xData1 = currentData.time
yData1 = currentData.data
beams = currentData.fov.beams
gates = currentData.fov.gates
if dataObj2 is not None and dataSet2 is None: dataSet2 == 'active'
if dataSet2 is not None:
if dataObj2 is not None:
currentData2 = getDataSet(dataObj2,dataSet2)
else:
currentData2 = getDataSet(dataObj,dataSet2)
xData2 = currentData2.time
yData2 = currentData2.data
yData2_title = currentData2.history[max(currentData2.history.keys())]
else:
xData2 = None
yData2 = None
yData2_title = None
# Define x-axis range
if xlim is None:
tmpLim = []
tmpLim.append(min(xData1))
tmpLim.append(max(xData1))
if xData2 is not None:
tmpLim.append(min(xData2))
tmpLim.append(max(xData2))
xlim = (min(tmpLim),max(tmpLim))
# Set x boundary limits using timeLimits, if they exist. Account for both dataSet1 and dataSet2, and write it so timeLimits can be any type of sequence.
if xBoundaryLimits is None:
tmpLim = []
if currentData.metadata.has_key('timeLimits'):
tmpLim.append(currentData.metadata['timeLimits'][0])
tmpLim.append(currentData.metadata['timeLimits'][1])
if dataSet2 is not None:
if currentData2.metadata.has_key('timeLimits'):
tmpLim.append(currentData2.metadata['timeLimits'][0])
tmpLim.append(currentData2.metadata['timeLimits'][1])
if tmpLim != []:
xBoundaryLimits = (min(tmpLim), max(tmpLim))
# Get X-Axis title.
if xlabel is None:
xlabel = 'UT'
# Get Y-Axis title.
paramDict = getParamDict(currentData.metadata['param'])
if ylabel is None and paramDict.has_key('label'):
ylabel = paramDict['label']
yData1_title = currentData.history[max(currentData.history.keys())] # Label the plot with the current level of data processing
if title is None:
title = []
title.append('Selected Cells: '+yData1_title)
title.append(currentData.metadata['code'][0].upper() + ': ' +
xlim[0].strftime('%Y %b %d %H:%M - ') + xlim[1].strftime('%Y %b %d %H:%M'))
title = '\n'.join(title)
multiPlot(xData1,yData1,beams,gates,yData1_title=yData1_title,fig=fig,xlim=xlim,ylim=ylim,xlabel=xlabel,ylabel=ylabel,title=title,
xData2=xData2,yData2=yData2,yData2_title=yData2_title,xBoundaryLimits=xBoundaryLimits)
def spectrumMultiPlot(dataObj,dataSet='active',plotType='real_imag',plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,xBoundaryLimits=None):
"""Plots 1D line spectral plots of selected cells in a pydarn.proc.music.musicArray object.
This defaults to 9 cells of the FOV.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
plotType : Optional[str]
{'real_imag'|'magnitude'|'phase'}
plotBeam : Optional[list of int]
list of beams to plot from
plotGate : Optional[list of int]
list of range gates to plot from
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getattr(dataObj,dataSet)
if plotType == 'magnitude':
xData1 = currentData.freqVec
yData1 = np.abs(currentData.spectrum)
yData1_title = 'Magnitude'
ylabel = 'Magnitude'
xData2 = None
yData2 = None
yData2_title = None
if xlim is None:
xlim = (0,np.max(xData1))
if ylim is None:
ylim = (0,np.max(yData1))
elif plotType == 'phase':
xData1 = currentData.freqVec
yData1 = np.angle(currentData.spectrum)
yData1_title = 'Magnitude'
ylabel = 'Phase [rad]'
xData2 = None
yData2 = None
yData2_title = None
if xlim is None:
xlim = (0,np.max(xData1))
else:
xData1 = currentData.freqVec
yData1 = np.real(currentData.spectrum)
yData1_title = 'Real Part'
ylabel = 'Amplitude'
xData2 = currentData.freqVec
yData2 = np.imag(currentData.spectrum)
yData2_title = 'Imaginary Part'
if xlim is None:
xlim = (np.min(xData1),np.max(xData1))
beams = currentData.fov.beams
gates = currentData.fov.gates
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
# Get X-Axis title.
if xlabel is None:
xlabel = 'Frequency [Hz]'
if title is None:
title = []
title.append('Selected Cells: '+currentData.history[max(currentData.history.keys())]) # Label the plot with the current level of data processing.
title.append(currentData.metadata['code'][0].upper() + ': ' +
timeLim[0].strftime('%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M'))
title = '\n'.join(title)
multiPlot(xData1,yData1,beams,gates,yData1_title=yData1_title,fig=fig,xlim=xlim,ylim=ylim,xlabel=xlabel,ylabel=ylabel,title=title,
xData2=xData2,yData2=yData2,yData2_title=yData2_title,xBoundaryLimits=xBoundaryLimits)
return fig
def multiPlot(xData1,yData1,beams,gates,yData1_title=None,plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,
xData2=None,yData2=None,yData2_title=None,xBoundaryLimits=None):
"""Plots 1D time series or line spectral plots of selected cells in a 3d-array. Two data sets can be plotted simultaneously for comparison.
This defaults to 9 cells of the 3d-array.
Parameters
----------
xData1 : 1d list or numpy.array
x-axis values
yData1 : 3d numpy.array
Data to plot. First axis should correspond to xData1.
beams : Optional[list]
list identifying the beams present in the second axis of xData1.
gates : Optional[list]
list identifying the gates present in the second axis of xData1.
yData1_title : Optional[str]
Name of yData1 data.
plot_beam : Optional[list of int]
list of beams to plot from (corresponds to yData1 second axis)
plot_gate : Optional[list of int]
list of range gates to plot from (corresponds to yData1 third axis)
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xData2 : Optional[1d list or numpy.array]
x-axis values of second data set
yData1 : Optional[3d numpy.array]
Second data set data to plot. First axis should correspond to xData1.
yData2_title : Optional[str]
Name of yData2 data.
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
from matplotlib import dates as md
# Calculate three default beams and gates to plot.
if plotBeam is None:
beamMin = min(beams)
beamMed = int(np.median(beams))
beamMax = max(beams)
plotBeam = np.array([beamMin,beamMed,beamMax])
if plotGate is None:
gateMin = min(gates)
gateMed = int(np.median(gates))
gateMax = max(gates)
plotGate = np.array([gateMin,gateMed,gateMax])
# Put things in the correct order. Gates need to be backwards.
plotBeam.sort()
plotGate.sort()
plotGate = plotGate[::-1] # Reverse the order.
# Determine the indices of the beams and gates.
plotBeamInx = []
for item in plotBeam:
plotBeamInx.append(int(np.where(beams == item)[0]))
plotGateInx = []
for item in plotGate:
plotGateInx.append(int(np.where(gates == item)[0]))
plotBeamInx = np.array(plotBeamInx)
plotGateInx = np.array(plotGateInx)
nCols = len(plotBeam)
nRows = len(plotGate)
# Define x-axis range
if xlim is None:
tmpLim = []
tmpLim.append(min(xData1))
tmpLim.append(max(xData1))
if xData2 is not None:
tmpLim.append(min(xData2))
tmpLim.append(max(xData2))
xlim = (min(tmpLim),max(tmpLim))
# Autorange y-axis... make all plots have the same range.
data = []
if ylim is None:
for rg,rgInx in zip(plotGate,plotGateInx):
for bm,bmInx in zip(plotBeam,plotBeamInx):
for item in yData1[:,bmInx,rgInx]:
data.append(item)
if yData2 is not None:
for item in yData2[:,bmInx,rgInx]:
data.append(item)
mx = np.nanmax(data)
mn = np.nanmin(data)
if np.logical_and(mx > 0,mn >= -0.001):
ylim = (0,mx)
elif np.logical_and(mn < 0, mx <= 0.001):
ylim = (mn,0)
elif abs(mx) >= abs(mn):
ylim = (-mx,mx)
elif abs(mn) > abs(mx):
ylim = (-abs(mn),abs(mn))
ii = 1
for rg,rgInx in zip(plotGate,plotGateInx):
for bm,bmInx in zip(plotBeam,plotBeamInx):
axis = fig.add_subplot(nCols,nRows,ii)
l1, = axis.plot(xData1,yData1[:,bmInx,rgInx],label=yData1_title)
if yData2 is not None:
l2, = axis.plot(xData2,yData2[:,bmInx,rgInx],label=yData2_title)
# Set axis limits.
axis.set_xlim(xlim)
axis.set_ylim(ylim)
# Special handling for time axes.
if xlabel == 'UT':
axis.xaxis.set_major_formatter(md.DateFormatter('%H:%M'))
labels = axis.get_xticklabels()
for label in labels:
label.set_rotation(30)
# Gray out area outside of the boundary.
if xBoundaryLimits is not None:
gray = '0.75'
axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray)
axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray)
axis.axvline(x=xBoundaryLimits[0],color='g',ls='--',lw=2)
axis.axvline(x=xBoundaryLimits[1],color='g',ls='--',lw=2)
text = 'Beam: %i, Gate: %i' % (bm, rg)
axis.text(0.02,0.92,text,transform=axis.transAxes)
# Only the first column gets labels.
if ii % nCols == 1:
axis.set_ylabel(ylabel)
# Only have the last row have time ticks
if ii <= (nRows-1)*nCols:
axis.xaxis.set_visible(False)
else:
axis.set_xlabel(xlabel)
ii = ii+1
if yData1_title is not None and yData2_title is not None:
fig.legend((l1,l2),(yData1_title,yData2_title),loc=(0.55,0.92))
if title is not None:
fig.text(0.12,0.92,title,size=24)
return fig
def plotFullSpectrum(dataObj,dataSet='active',
fig = None,
axis = None,
xlim = None,
normalize = False,
scale = None,
plot_title = True,
maxXTicks = 10.,
plot_cbar = True,
cbar_label = 'ABS(Spectral Density)',
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_pad = 0.05,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
cbar_gstext_enable = True,
**kwArgs):
"""Plot full spectrum of a pydarn.proc.music.musicArray object. The spectrum must have already been calculated with
pydarn.proc.music.calculateFFT().
In this plot, major divisions on the x-axis are FFT bins. Every bin contains one slice representing each beam of the given radar
data, from left to right. The y-axis shows the range gates of the data object. The color bar at the top of the plot shows which
FFT bin contains the most power when integrating over the entire bin.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
axis : Optional[ ]
Matplotlib axis object to plot on.
xlim : Optional[None or 2-element iterable]
X-axis limits in Hz
plot_title : Optional[bool]
If True, plot the title information
maxXTicks : Optional[int]
Maximum number of xtick labels.
cbar_label : Optional[str]
Text for color bar label
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
cbar_gstext_enable : Optional[bool]
Enable "Ground Scatter Only" text
**kwArgs :
Keyword Arguments
Returns
-------
return_dict
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy import stats
return_dict = {}
currentData = getDataSet(dataObj,dataSet)
nrFreqs,nrBeams,nrGates = np.shape(currentData.spectrum)
if xlim is None:
posFreqInx = np.where(currentData.freqVec >= 0)[0]
else:
posFreqInx = np.where(np.logical_and(currentData.freqVec >= xlim[0],currentData.freqVec <= xlim[1]))[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) # Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) # Use the magnitude of the positive frequency data.
if normalize:
data = data / data.max()
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 2.*sd
if scale is None:
scale = scMax*np.array([0,1.])
nXBins = nrBeams * npf # number of bins we are going to plot
# Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
# Do plotting here!
if fig is None and axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
elif axis is not None:
fig = axis.get_figure()
if axis is None:
axis = fig.add_subplot(111)
verts = []
scan = []
# Plot Spectrum
sep = 0.1
for ff in range(npf):
for bb in range(nrBeams):
xx0 = nrBeams*(ff + 0.5*sep) + bb*(1-sep)
xx1 = xx0 + (1-sep)
for gg in range(nrGates):
scan.append(data[ff,bb,gg])
yy0 = gg
yy1 = gg + 1
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
param = 'power'
cmap = matplotlib.cm.Blues_r
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
spect_pcoll = pcoll
# Colorbar
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction,pad=cbar_pad)
cbar.set_label(cbar_label)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat') and cbar_gstext_enable:
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
# Plot average values.
verts = []
scan = []
yy0 = nrGates
yy1 = nrGates + 1
for ff in range(npf):
scan.append(avg_psd[ff])
xx0 = nrBeams*(ff + 0.5*sep)
xx1 = xx0 + nrBeams*(1-sep)
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
param = 'power'
cmap = matplotlib.cm.winter
norm = matplotlib.colors.Normalize(vmin = 0, vmax = np.max(avg_psd))
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Mark maximum PSD column.
maxInx = np.argmax(avg_psd)
xx0 = nrBeams*(maxInx + 0.5*sep)
xx1 = xx0 + nrBeams*(1-sep)
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
mkv = np.array([[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x1,y1]])
poly = Polygon(mkv,facecolor='Red',edgecolor='none',zorder=100)
axis.add_patch(poly)
# X-Labels
modX = np.ceil(npf / np.float(maxXTicks))
xlabels = []
xpos = []
for ff in range(npf-1):
if (ff % modX) != 0: continue
freqLabel = '%.2f' % (posFreqVec[ff]*1000.)
if posFreqVec[ff] == 0:
periodLabel = 'Inf'
else:
periodLabel = '%.0f' % (1./posFreqVec[ff] / 60.)
xlabels.append(freqLabel+'\n'+periodLabel)
xpos.append(nrBeams* (ff + 0.1))
xlabels.append('freq [mHz]\nPer. [min]')
xpos.append(nrBeams* (npf-1 + 0.1))
axis.set_xticks(xpos)
axis.set_xticklabels(xlabels,ha='left')
# Y-Labels
maxYTicks = 10.
modY = np.ceil(nrGates/maxYTicks)
ylabels = []
ypos = []
for gg in range(nrGates):
if (gg % modY) != 0: continue
ylabels.append('%i' % currentData.fov.gates[gg])
ypos.append(gg+0.5)
ylabels.append('$\Sigma$PSD')
ypos.append(nrGates+0.5)
axis.set_yticks(ypos)
axis.set_yticklabels(ylabels)
axis.set_ylabel('Range Gate')
for ff in range(npf):
axis.axvline(x=ff*nrBeams,color='k',lw=2)
# axis.set_xlim([0,nXBins])
axis.set_ylim([0,nrGates+1])
if plot_title:
xpos = 0.130
fig.text(xpos,0.99,'Full Spectrum View',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
# cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
return_dict['cbar_pcoll'] = spect_pcoll
return_dict['cbar_label'] = cbar_label
return return_dict
def plotDlm(dataObj,dataSet='active',fig=None):
"""Plot the cross spectral matrix of a pydarn.proc.music.musicArray object. The cross-spectral matrix must have already
been calculated for the chosen data set using pydarn.proc.music.calculateDlm().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
import copy
from scipy import stats
currentData = getDataSet(dataObj,dataSet)
data = np.abs(currentData.Dlm)
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 4.*sd
scale = scMax*np.array([0,1.])
# Do plotting here!
axis = fig.add_subplot(111)
nrL, nrM = np.shape(data)
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL):
xx0 = ll
xx1 = ll+1
for mm in range(nrM):
scan.append(data[ll,mm])
yy0 = mm
yy1 = mm + 1
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
colors = 'lasse'
if scale is None:
scale = (np.min(scan),np.max(scan))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Colorbar
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
cbar.set_label('ABS(Spectral Density)')
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,-0.075,'Ground\nscat\nonly',ha='center')
# labels[-1].set_visible(False)
axis.set_xlim([0,nrL])
axis.set_ylim([0,nrM])
axis.set_xlabel('l')
axis.set_ylabel('m')
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
ticks = []
labels = []
mod = int(np.floor(nrGates / 10))
for x in xrange(nrGates):
if x % mod != 0: continue
ll = nrBeams*x
ticks.append(ll)
txt = '%i\n%i' % (ll, currentData.fov.gates[x])
labels.append(txt)
ticks.append(nrL)
xlabels = copy.copy(labels)
xlabels.append('l\ngate')
axis.set_xticks(ticks)
axis.set_xticklabels(xlabels,ha='left')
ylabels = copy.copy(labels)
ylabels.append('m\ngate')
axis.set_yticks(ticks)
axis.set_yticklabels(ylabels)
xpos = 0.130
fig.text(xpos,0.99,'ABS(Cross Spectral Density Matrix Dlm)',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
def plotKarr(dataObj,dataSet='active',fig=None,axis=None,maxSignals=None, sig_fontsize=24,
plot_title=True, cbar_ticks=None, cbar_shrink=1.0, cbar_fraction=0.15,
cbar_gstext_offset=-0.075, cbar_gstext_fontsize=None, **kwArgs):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[None or matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
axis : Optional[ ]
Matplotlib axis object to plot on.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
sig_fontsize : Optional[float]
fontsize of signal markers
plot_title : Optional[bool]
If True, plot the title information
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
**kwArgs
Keywords arguments
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None and axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
# Do plotting here!
if axis is None:
axis = fig.add_subplot(111,aspect='equal')
else:
fig = axis.get_figure()
plotKarrAxis(dataObj,dataSet=dataSet,axis=axis,maxSignals=maxSignals,
cbar_ticks=cbar_ticks, cbar_shrink=cbar_shrink, cbar_fraction=cbar_fraction,sig_fontsize=sig_fontsize,
cbar_gstext_offset=cbar_gstext_offset, cbar_gstext_fontsize=cbar_gstext_fontsize,**kwArgs)
if plot_title:
xpos = 0.130
fig.text(xpos,0.99,'Horizontal Wave Number',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
def plotKarrDetected(dataObj,dataSet='active',fig=None,maxSignals=None,roiPlot=True):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
Unlike plotKarr, this routine can plot a region-of-interest map showing features detected by pydarn.proc.music.detectSignals().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[None or matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
roiPlot : Optional[bool]
If true, a region of interest plot showing the features detected using pydarn.proc.music.detectSignals()
will be displayed alongside the kArr plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
from scipy import stats
import matplotlib.patheffects as PathEffects
# Do plotting here!
if roiPlot:
axis = fig.add_subplot(121,aspect='equal')
else:
axis = fig.add_subplot(111,aspect='equal')
# Page-wide header #############################################################
xpos = 0.130
fig.text(xpos,0.99,'Horizontal Wave Number',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
# End Page-wide header #########################################################
plotKarrAxis(dataObj,dataSet=dataSet,axis=axis,maxSignals=maxSignals)
if roiPlot:
################################################################################
# Feature detection...
data2 = currentData.sigDetect.labels
nrL, nrM = np.shape(data2)
scale = [0,data2.max()]
# Do plotting here!
axis = fig.add_subplot(122,aspect='equal')
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL-1):
xx0 = currentData.kxVec[ll]
xx1 = currentData.kxVec[ll+1]
for mm in range(nrM-1):
scan.append(data2[ll,mm])
yy0 = currentData.kyVec[mm]
yy1 = currentData.kyVec[mm + 1]
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
axis.axvline(color='0.82',lw=2,zorder=150)
axis.axhline(color='0.82',lw=2,zorder=150)
# Colorbar
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
cbar.set_label('Region of Interest')
cbar.set_ticks([])
axis.set_xlim([np.min(currentData.kxVec),np.max(currentData.kxVec)])
axis.set_ylim([np.min(currentData.kyVec),np.max(currentData.kyVec)])
# Add wavelength to x/y tick labels ############################################
ticks = axis.get_xticks()
newLabels = []
for x in xrange(len(ticks)):
tck = ticks[x]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_xticklabels(newLabels)
axis.set_xlabel(u'kx [rad]\n$\lambda$ [km]',ha='center')
ticks = axis.get_yticks()
newLabels = []
for y in xrange(len(ticks)):
tck = ticks[y]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_yticklabels(newLabels)
axis.set_ylabel(u'ky [rad]\n$\lambda$ [km]',va='center')
# End add wavelength to x/y tick labels ########################################
if hasattr(currentData,'sigDetect'):
pe = [PathEffects.withStroke(linewidth=3,foreground='w')]
tmpList = range(currentData.sigDetect.nrSigs)[::-1] # Force list to plot backwards so number 1 is on top!
for signal in currentData.sigDetect.info:
if maxSignals is not None:
if signal['order'] > maxSignals: continue
xpos = currentData.kxVec[signal['maxpos'][0]]
ypos = currentData.kyVec[signal['maxpos'][1]]
txt = '%i' % signal['order']
axis.text(xpos,ypos,txt,color='k',zorder=200-signal['order'],size=24,path_effects=pe)
def plotKarrAxis(dataObj,dataSet='active',axis=None,maxSignals=None, sig_fontsize=24,x_labelpad=None,y_labelpad=None,
cbar_ticks=None, cbar_shrink=1.0, cbar_fraction=0.15,
cbar_gstext_offset=-0.075, cbar_gstext_fontsize=None,cbar_pad=0.05,cmap=None,plot_colorbar=True):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
This routine will make the plot without titles, etc. It is used as the foundation for plotKarr() and plotKarrDetected().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
axis : Optional[matplotlib.figure.axis]
matplotlib axis object that will be plotted to. If not provided, this function will return.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
sig_fontsize : Optional[float]
fontsize of signal markers
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
cmap : Optional[None or matplotlib colormap object]
If None and cmap_handling=='matplotlib', use jet.
plot_colorbar : Optional[bool]
Enable or disable colorbar plotting.
Returns
-------
return_dict
Written by Nathaniel A. Frissell, Fall 2013
"""
if axis is None: return
return_dict = {}
fig = axis.get_figure()
from scipy import stats
import matplotlib.patheffects as PathEffects
currentData = getDataSet(dataObj,dataSet)
data = np.abs(currentData.karr) - np.min(np.abs(currentData.karr))
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
scale = [0.,1.]
nrL, nrM = np.shape(data)
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL-1):
xx0 = currentData.kxVec[ll]
xx1 = currentData.kxVec[ll+1]
for mm in range(nrM-1):
scan.append(data[ll,mm])
yy0 = currentData.kyVec[mm]
yy1 = currentData.kyVec[mm + 1]
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if cmap is None:
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
################################################################################
# Annotations
axis.axvline(color='0.82',lw=2,zorder=150)
axis.axhline(color='0.82',lw=2,zorder=150)
# Colorbar
cbar_label = 'Normalized Wavenumber Power'
if plot_colorbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction,pad=cbar_pad)
cbar.set_label(cbar_label)
if not cbar_ticks:
cbar_ticks = np.arange(10)/10.
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
# cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
# cbar.set_label('ABS(Spectral Density)')
# cbar.set_ticks(np.arange(10)/10.)
# if currentData.metadata.has_key('gscat'):
# if currentData.metadata['gscat'] == 1:
# cbar.ax.text(0.5,-0.075,'Ground\nscat\nonly',ha='center')
axis.set_xlim([np.min(currentData.kxVec),np.max(currentData.kxVec)])
axis.set_ylim([np.min(currentData.kyVec),np.max(currentData.kyVec)])
# Add wavelength to x/y tick labels ############################################
ticks = axis.get_xticks()
newLabels = []
for x in xrange(len(ticks)):
tck = ticks[x]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_xticklabels(newLabels)
axis.set_xlabel(u'kx [rad]\n$\lambda$ [km]',ha='center',labelpad=x_labelpad)
# axis.set_xlabel('%f' % x_labelpad,ha='center',labelpad=x_labelpad)
ticks = axis.get_yticks()
newLabels = []
for y in xrange(len(ticks)):
tck = ticks[y]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([km_txt,rad_txt])
newLabels.append(txt)
axis.set_yticklabels(newLabels,rotation=90.)
axis.set_ylabel(u'ky [rad]\n$\lambda$ [km]',va='center',labelpad=y_labelpad)
# End add wavelength to x/y tick labels ########################################
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
if hasattr(currentData,'sigDetect'):
pe = [PathEffects.withStroke(linewidth=3,foreground='w')]
for signal in currentData.sigDetect.info:
if maxSignals is not None:
if signal['order'] > maxSignals: continue
xpos = currentData.kxVec[signal['maxpos'][0]]
ypos = currentData.kyVec[signal['maxpos'][1]]
txt = '%i' % signal['order']
axis.text(xpos,ypos,txt,color='k',zorder=200-signal['order'],size=sig_fontsize,path_effects=pe)
return_dict['cbar_pcoll'] = pcoll
return_dict['cbar_label'] = cbar_label
return return_dict
| gpl-3.0 |
gfyoung/pandas | pandas/tests/tslibs/test_to_offset.py | 2 | 4769 | import re
import pytest
from pandas._libs.tslibs import Timedelta, offsets, to_offset
@pytest.mark.parametrize(
"freq_input,expected",
[
(to_offset("10us"), offsets.Micro(10)),
(offsets.Hour(), offsets.Hour()),
("2h30min", offsets.Minute(150)),
("2h 30min", offsets.Minute(150)),
("2h30min15s", offsets.Second(150 * 60 + 15)),
("2h 60min", offsets.Hour(3)),
("2h 20.5min", offsets.Second(8430)),
("1.5min", offsets.Second(90)),
("0.5S", offsets.Milli(500)),
("15l500u", offsets.Micro(15500)),
("10s75L", offsets.Milli(10075)),
("1s0.25ms", offsets.Micro(1000250)),
("1s0.25L", offsets.Micro(1000250)),
("2800N", offsets.Nano(2800)),
("2SM", offsets.SemiMonthEnd(2)),
("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)),
("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),
("2SMS-15", offsets.SemiMonthBegin(2)),
],
)
def test_to_offset(freq_input, expected):
result = to_offset(freq_input)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)]
)
def test_to_offset_negative(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"freqstr",
[
"2h20m",
"U1",
"-U",
"3U1",
"-2-3U",
"-2D:3H",
"1.5.0S",
"2SMS-15-15",
"2SMS-15D",
"100foo",
# Invalid leading +/- signs.
"+-1d",
"-+1h",
"+1",
"-7",
"+d",
"-m",
# Invalid shortcut anchors.
"SM-0",
"SM-28",
"SM-29",
"SM-FOO",
"BSM",
"SM--1",
"SMS-1",
"SMS-28",
"SMS-30",
"SMS-BAR",
"SMS-BYR",
"BSMS",
"SMS--2",
],
)
def test_to_offset_invalid(freqstr):
# see gh-13930
# We escape string because some of our
# inputs contain regex special characters.
msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
to_offset(freqstr)
def test_to_offset_no_evaluate():
msg = str(("", ""))
with pytest.raises(TypeError, match=msg):
to_offset(("", ""))
def test_to_offset_tuple_unsupported():
with pytest.raises(TypeError, match="pass as a string instead"):
to_offset((5, "T"))
@pytest.mark.parametrize(
"freqstr,expected",
[
("2D 3H", offsets.Hour(51)),
("2 D3 H", offsets.Hour(51)),
("2 D 3 H", offsets.Hour(51)),
(" 2 D 3 H ", offsets.Hour(51)),
(" H ", offsets.Hour()),
(" 3 H ", offsets.Hour(3)),
],
)
def test_to_offset_whitespace(freqstr, expected):
result = to_offset(freqstr)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)]
)
def test_to_offset_leading_zero(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)])
def test_to_offset_leading_plus(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"kwargs,expected",
[
({"days": 1, "seconds": 1}, offsets.Second(86401)),
({"days": -1, "seconds": 1}, offsets.Second(-86399)),
({"hours": 1, "minutes": 10}, offsets.Minute(70)),
({"hours": 1, "minutes": -10}, offsets.Minute(50)),
({"weeks": 1}, offsets.Day(7)),
({"hours": 1}, offsets.Hour(1)),
({"hours": 1}, to_offset("60min")),
({"microseconds": 1}, offsets.Micro(1)),
({"microseconds": 0}, offsets.Nano(0)),
],
)
def test_to_offset_pd_timedelta(kwargs, expected):
# see gh-9064
td = Timedelta(**kwargs)
result = to_offset(td)
assert result == expected
@pytest.mark.parametrize(
"shortcut,expected",
[
("W", offsets.Week(weekday=6)),
("W-SUN", offsets.Week(weekday=6)),
("Q", offsets.QuarterEnd(startingMonth=12)),
("Q-DEC", offsets.QuarterEnd(startingMonth=12)),
("Q-MAY", offsets.QuarterEnd(startingMonth=5)),
("SM", offsets.SemiMonthEnd(day_of_month=15)),
("SM-15", offsets.SemiMonthEnd(day_of_month=15)),
("SM-1", offsets.SemiMonthEnd(day_of_month=1)),
("SM-27", offsets.SemiMonthEnd(day_of_month=27)),
("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),
("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),
],
)
def test_anchored_shortcuts(shortcut, expected):
result = to_offset(shortcut)
assert result == expected
| bsd-3-clause |
hollerith/trading-with-python | lib/cboe.py | 76 | 4433 | # -*- coding: utf-8 -*-
"""
toolset working with cboe data
@author: Jev Kuznetsov
Licence: BSD
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index
from pandas.core import datetools
import numpy as np
import pandas as pd
def monthCode(month):
"""
perform month->code and back conversion
Input: either month nr (int) or month code (str)
Returns: code or month nr
"""
codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z')
if isinstance(month,int):
return codes[month-1]
elif isinstance(month,str):
return codes.index(month)+1
else:
raise ValueError('Function accepts int or str')
def vixExpiration(year,month):
"""
expriration date of a VX future
"""
t = datetime(year,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_exp = t_new-datetools.relativedelta(days=30)
return t_exp
def getPutCallRatio():
""" download current Put/Call ratio"""
urlStr = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv'
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
headerLine = 2
header = lines[headerLine].strip().split(',')
data = [[] for i in range(len(header))]
for line in lines[(headerLine+1):]:
fields = line.rstrip().split(',')
data[0].append(datetime.strptime(fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[1:]):
data[i+1].append(float(field))
return DataFrame(dict(zip(header[1:],data[1:])), index = Index(data[0]))
def getHistoricData(symbols = ['VIX','VXV','VXMT','VVIX']):
''' get historic data from CBOE
return dataframe
'''
if not isinstance(symbols,list):
symbols = [symbols]
urls = {'VIX':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vixcurrent.csv',
'VXV':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxvdailyprices.csv',
'VXMT':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vxmtdailyprices.csv',
'VVIX':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/VVIXtimeseries.csv'}
startLines = {'VIX':1,'VXV':2,'VXMT':2,'VVIX':1}
cols = {'VIX':'VIX Close','VXV':'CLOSE','VXMT':'Close','VVIX':'VVIX'}
data = {}
for symbol in symbols:
urlStr = urls[symbol]
print 'Downloading %s from %s' % (symbol,urlStr)
data[symbol] = pd.read_csv(urllib2.urlopen(urlStr), header=startLines[symbol],index_col=0,parse_dates=True)[cols[symbol]]
return pd.DataFrame(data)
#---------------------classes--------------------------------------------
class VixFuture(object):
"""
Class for easy handling of futures data.
"""
def __init__(self,year,month):
self.year = year
self.month = month
def expirationDate(self):
return vixExpiration(self.year,self.month)
def daysLeft(self,date):
""" business days to expiration date """
from pandas import DateRange # this will cause a problem with pandas 0.14 and higher... Method is depreciated and replaced by DatetimeIndex
r = DateRange(date,self.expirationDate())
return len(r)
def __repr__(self):
return 'VX future [%i-%i %s] Exprires: %s' % (self.year,self.month,monthCode(self.month),
self.expirationDate())
#-------------------test functions---------------------------------------
def testDownload():
vix = getHistoricData('VIX')
vxv = getHistoricData('VXV')
vix.plot()
vxv.plot()
def testExpiration():
for month in xrange(1,13):
d = vixExpiration(2011,month)
print d.strftime("%B, %d %Y (%A)")
if __name__ == '__main__':
#testExpiration()
v = VixFuture(2011,11)
print v
print v.daysLeft(datetime(2011,11,10))
| bsd-3-clause |
LarsDu/DeepNuc | deepnuc/nucregressor.py | 2 | 14634 | import tensorflow as tf
import sklearn.metrics as metrics
import numpy as np
import nucconvmodel
import sys
import os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from duseqlogo import LogoTools
import scipy.stats
from nucinference import NucInference
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pprint
from itertools import cycle
import nucheatmap
from collections import OrderedDict
class NucRegressor(NucInference):
use_onehot_labels = False
def __init__(self,
sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob=0.5,
beta1=0.9,
concat_revcom_input=False,
nn_method_key="inferenceA",
classification_threshold=None,
output_scale=[0,1]):
super(NucRegressor, self).__init__( sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob,
beta1,
concat_revcom_input,
nn_method_key)
self.training_mean,self.training_std = self.train_batcher.get_label_mean_std()
self.training_min,self.training_max = self.train_batcher.get_label_min_max()
'''
print "Training mean:\t",self.training_mean
print "Training standard deviation:\t",self.training_std
print "Training min:\t", self.training_min
print "Training max:\t",self.training_max
'''
#For now this can only work if nucdata and batcher are specified as having 1 class
if self.train_batcher.num_classes != 1:
print "Error, more than two classes detected in train batcher"
else:
self.num_classes = 1
self.classification_threshold = classification_threshold
#In order to plot figures in their original scale instead of [0,1] we need to
#pass the nuc regressor, the original scale the data came in.
self.output_scale = output_scale
self.save_on_epoch = 50
def build_model(self):
self.dna_seq_placeholder = tf.placeholder(tf.float32,
shape=[None,self.seq_len,4],
name="dna_seq")
#Note for regression, these perhaps should not be called labels
self.labels_placeholder = tf.placeholder(tf.float32,
shape=[None, self.num_classes],
name="labels")
self.keep_prob_placeholder = tf.placeholder(tf.float32,name="keep_prob")
#Note: Since I am not using a sigmoid here, technically these are not logits
self.raw_logits, self.network = self.nn_method(self.dna_seq_placeholder,
self.keep_prob_placeholder,
self.num_classes)
self.logits = tf.nn.sigmoid(self.raw_logits)
#self.descaled_prediction = (self.logits * self.training_std)+self.training_mean
#self.descaled_prediction = self.logits*(self.training_max-self.training_min)+self.training_min
#self.standardized_labels = (self.labels_placeholder-self.training_mean)/self.training_std
#self.standardized_labels = (self.labels_placeholder-self.training_min)/(self.training_max-self.training_min)
#Regression tasks should use mean squared error
self.squared_diff = tf.squared_difference(self.logits,self.labels_placeholder)
self.loss = tf.reduce_mean(self.squared_diff)
'''Write and consolidate summaries'''
self.loss_summary = tf.summary.scalar('loss',self.loss)
self.summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph)
self.summary_op = tf.summary.merge([self.loss_summary])
#Note: Do not use tf.summary.merge_all() here. This will break encapsulation for
# cross validation and lead to crashes when training multiple models
# Add gradient ops to graph with learning rate
self.train_op = tf.train.AdamOptimizer(self.learning_rate,
beta1=self.beta1).minimize(self.loss)
self.vars = tf.trainable_variables()
self.var_names = [var.name for var in self.vars]
#print "Trainable variables:\n"
#for vname in self.var_names:
# print vname
self.saver = tf.train.Saver()
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
self.load(self.checkpoint_dir)
def eval_model_metrics(self,
batcher,
save_plots=False,
image_name='auroc_auprc.png',
eval_batch_size=50):
"""
Note: This method is intended to only be used for regression tasks
"""
all_true = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
all_preds = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
num_whole_pulls = batcher.num_records//eval_batch_size
num_single_pulls= batcher.num_records%eval_batch_size
num_steps = num_whole_pulls+num_single_pulls
for i in range(num_steps):
if i<num_whole_pulls:
batch_size=eval_batch_size
else:
batch_size=1
labels_batch, dna_seq_batch = batcher.pull_batch(batch_size)
feed_dict = {
self.dna_seq_placeholder:dna_seq_batch,
self.labels_placeholder:labels_batch,
self.keep_prob_placeholder:1.0
}
cur_preds= self.sess.run(self.logits,feed_dict=feed_dict)
#Fill labels array
if batch_size > 1:
start_ind = batch_size*i
elif batch_size == 1:
start_ind = num_whole_pulls*eval_batch_size+(i-num_whole_pulls)
else:
print "Never reach this condition"
all_true[start_ind:start_ind+batch_size] = labels_batch
all_preds[start_ind:start_ind+batch_size] = cur_preds
'''
#Test code
print "Classification threshold",self.classification_threshold
print "Checking here", np.sum(all_true>self.classification_threshold).astype(np.float32)
print "DOUBLE checking"
print all_true.shape
counter = 0
for i in range(batcher.num_records):
lab,_ = batcher.pull_batch(1)
if lab>self.classification_threshold:
counter += 1
print "Counter",counter
'''
print "True",all_true[-1:-10,0]
print "Preds",all_preds[-1:-10,0]
#Calc metrics and save results in a dict
md = self.calc_regression_metrics(all_true[:,0],all_preds[:,0])
md["epoch"]=self.epoch
md["step"]=self.step
print 'Mean Absolute Error: %0.08f Mean Squared Error: %0.08f Median Absolute Error: %0.08f R-squared score %0.08f' % (md["mean_absolute_error"],
md["mean_squared_error"],
md["median_absolute_error"],
md["r2_score"])
print 'Pearson correlation: %0.04f Pearson p-value: %0.04f \
Spearman correlation: %.04f Spearman p-value: %.04f' % \
(md['pearson_correlation'],
md['pearson_pvalue'],
md['spearman_correlation'],
md['spearman_pvalue'])+"\n"
#print "Classification threshold!",self.classification_threshold
if self.classification_threshold:
'''
If a classification threshold was specified, calc
auROC,auPRC and other classification metrics
'''
cd = self.calc_thresh_classifier_metrics(all_true,
all_preds,
self.classification_threshold)
#Add cd dict entries to md
md.update(cd)
num_correct = md["accuracy"]
print 'Num examples: %d Num correct: %d Accuracy: %0.04f' % \
(batcher.num_records, md["num_correct"], md["accuracy"])+'\n'
if save_plots and self.classification_threshold:
###Plot some metrics
plot_colors = cycle(['cyan','blue','orange','teal'])
#Generate auROC plot axes
fig1,ax1 = plt.subplots(2)
fig1.subplots_adjust(bottom=0.2)
ax1[0].plot([0,1],[0,1],color='navy',lw=2,linestyle='--')
ax1[0].set_xbound(0.0,1.0)
ax1[0].set_ybound(0.0,1.05)
ax1[0].set_xlabel('False Positive Rate')
ax1[0].set_ylabel('True Positive Rate')
ax1[0].set_title('auROC')
#plt.legend(loc='lower right')
ax1[0].plot(md["fpr"],md["tpr"],color=plot_colors.next(),
lw=2,linestyle='-',label='auROC curve (area=%0.2f)' % md["auroc"] )
#Generate auPRC plot axes
#ax1[1].plot([0,1],[1,1],color='royalblue',lw=2,linestyle='--')
ax1[1].set_xlabel('Precision')
ax1[1].set_ylabel('Recall')
ax1[1].set_title('auPRC')
ax1[1].plot(md["thresh_precision"],md["thresh_recall"],color=plot_colors.next(),
lw=2,linestyle='-',label='auPRC curve (area=%0.2f)' % md["auprc"] )
ax1[1].set_xbound(0.0,1.0)
ax1[1].set_ybound(0.0,1.05)
#Note: avg prec score is the area under the prec recall curve
#Note: Presumably class 1 (pos examples) should be the only f1 score we focus on
#print "F1 score for class",i,"is",f1_score
plt.tight_layout()
plt_fname = self.save_dir+os.sep+image_name
print "Saving auROC image to",plt_fname
fig1.savefig(plt_fname)
return md
def calc_thresh_classifier_metrics(self,all_true,all_preds,threshold):
"""
Mark all predictions exceeding threshold as positive, and all
"""
self.pos_index=1
print "Thresh is",threshold
binary_true = (all_true>threshold).astype(np.float32)
binary_preds = (all_preds>threshold).astype(np.float32)
fpr,tpr,_ = metrics.roc_curve( binary_true,
binary_preds)
auroc = metrics.auc(fpr,tpr)
'''
print "Raw inputs(all_true)",all_true
print "Raw logits(all_probs)",all_preds
print "Threshold labels on inputs",binary_true
print "Sum of all thresholded inputs",np.sum(binary_true)
print "Thresholded labels on logits",binary_preds
print "Sum of thresholded logits",np.sum(binary_preds)
'''
thresh_precision,thresh_recall,prc_thresholds = metrics.precision_recall_curve(
binary_true,
all_preds,
pos_label=self.pos_index)
precision, recall, f1_score, support = metrics.precision_recall_fscore_support(
binary_true,
binary_preds,
pos_label=self.pos_index)
num_correct = metrics.accuracy_score(binary_true,binary_preds,normalize=False)
accuracy = num_correct/float(all_preds.shape[0])
precision = precision[self.pos_index]
recall = recall[self.pos_index]
f1_score = f1_score[self.pos_index]
support = support[self.pos_index]
auprc = metrics.average_precision_score(binary_true,all_preds)
return OrderedDict([
("num_correct",num_correct),
("accuracy",accuracy),
("auroc",auroc),
("auprc",auprc),
("fpr",fpr),
("tpr",tpr),
("precision",precision),
("recall",recall),
("f1_score",f1_score),
("support",support),
("thresh_precision",thresh_precision),
("thresh_recall",thresh_recall),
("prc_thresholds",prc_thresholds)
])
def calc_regression_metrics(self,all_true,all_preds):
mean_absolute_error = metrics.mean_absolute_error(all_true,all_preds)
mean_squared_error = metrics.mean_squared_error(all_true,all_preds)
median_absolute_error = metrics.median_absolute_error(all_true,all_preds)
r2_score = metrics.r2_score(all_preds,all_preds)
pearson_corr,pearson_pval = scipy.stats.pearsonr(all_true,all_preds)
spear_corr,spear_pval = scipy.stats.spearmanr(all_true,all_preds)
return OrderedDict([
("mean_absolute_error",mean_absolute_error),
("mean_squared_error",mean_squared_error),
("median_absolute_error",median_absolute_error),
("r2_score",r2_score),
("pearson_correlation",pearson_corr),
("pearson_pvalue",pearson_pval),
("spearman_correlation",spear_corr),
("spearman_pvalue",spear_pval)
])
| gpl-3.0 |
chrinide/hep_ml | hep_ml/metrics_utils.py | 3 | 10683 | from __future__ import division, print_function, absolute_import
import numpy
from sklearn.utils.validation import column_or_1d
from .commonutils import check_sample_weight, sigmoid_function
__author__ = 'Alex Rogozhnikov'
def prepare_distribution(data, weights):
"""Prepares the distribution to be used later in KS and CvM,
merges equal data, computes (summed) weights and cumulative distribution.
All output arrays are of same length and correspond to each other.
:param data: array of shape [n_samples]
:param weights: array of shape [n_samples]
:return: tuple with (prepared_data, prepared_weights, prepared_cdf),
components are three parallel arrays of shape [n_unique_values]
"""
weights = weights / numpy.sum(weights)
prepared_data, indices = numpy.unique(data, return_inverse=True)
prepared_weights = numpy.bincount(indices, weights=weights)
prepared_cdf = compute_cdf(prepared_weights)
return prepared_data, prepared_weights, prepared_cdf
# region Helpful functions to work with bins and groups
"""
There are two basic approaches to handle are bins and knn.
Here they are represented as bins and (!) groups.
The difference between bins and groups: each event belongs to one and only one bin,
in the case of groups each event may belong to several groups.
Knn is one particular case of groups, bins can be reduced to groups either
Bin_indices is an array, where for each event it's bin is written:
bin_indices = [0, 0, 1, 2, 2, 4]
Group_indices is list, each item is indices of events in some group
group_indices = [[0,1], [2], [3,4], [5]]
Group matrix is another way to write group_indices,
this is sparse matrix of shape [n_groups, n_samples],
group_matrix[group_id, sample_id] = 1, if event belong to cell, 0 otherwise
While bin indices are computed for all the events together, group indices
are typically computed only for events of some particular class.
"""
def compute_bin_indices(X_part, bin_limits=None, n_bins=20):
"""For arbitrary number of variables computes the indices of data,
the indices are unique numbers of bin from zero to \prod_j (len(bin_limits[j])+1)
:param X_part: columns along which binning is done
:param bin_limits: array of edges between bins.
If bin_limits is not provided, they are computed using data.
:type X_part: numpy.ndarray
"""
if bin_limits is None:
bin_limits = []
for variable_index in range(X_part.shape[1]):
variable_data = X_part[:, variable_index]
bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])
bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)
for axis, bin_limits_axis in enumerate(bin_limits):
bin_indices *= (len(bin_limits_axis) + 1)
bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])
return bin_indices
def bin_to_group_indices(bin_indices, mask):
""" Transforms bin_indices into group indices, skips empty bins
:type bin_indices: numpy.array, each element in index of bin this event belongs, shape = [n_samples]
:type mask: numpy.array, boolean mask of indices to split into bins, shape = [n_samples]
:rtype: list(numpy.array), each element is indices of elements in some bin
"""
assert len(bin_indices) == len(mask), "Different length"
bins_id = numpy.unique(bin_indices)
result = list()
for bin_id in bins_id:
result.append(numpy.where(mask & (bin_indices == bin_id))[0])
return result
def group_indices_to_groups_matrix(group_indices, n_events):
"""
:param group_indices: array, each component corresponds to group
(element = list with indices of events belonging to group)
:return: sparse matrix of shape [n_groups, n_samples],
one if particular event belongs to particular category, 0 otherwise
"""
from scipy import sparse
groups_matrix = sparse.lil_matrix((len(group_indices), n_events))
for group_id, events_in_group in enumerate(group_indices):
groups_matrix[group_id, events_in_group] = 1
return sparse.csr_matrix(groups_matrix)
# endregion
# region Supplementary uniformity-related functions (to measure flatness of predictions)
def compute_cdf(ordered_weights):
"""Computes cumulative distribution function (CDF) by ordered weights,
be sure that sum(ordered_weights) == 1.
Minor difference: using symmetrized version
F(x) = 1/2 (F(x-0) + F(x+0))
"""
return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights
def compute_bin_weights(bin_indices, sample_weight):
assert len(bin_indices) == len(sample_weight), 'Different lengths of array'
result = numpy.bincount(bin_indices, weights=sample_weight)
return result / numpy.sum(result)
def compute_divided_weight(group_matrix, sample_weight):
"""Divided weight takes into account that different events
are met different number of times """
occurences = numpy.array(group_matrix.sum(axis=0)).flatten()
return sample_weight / numpy.maximum(occurences, 1)
def compute_group_weights(group_matrix, sample_weight):
"""
Group weight = sum of divided weights of indices inside that group.
"""
divided_weight = compute_divided_weight(group_matrix=group_matrix, sample_weight=sample_weight)
result = group_matrix.dot(divided_weight)
return result / numpy.sum(result)
def compute_bin_efficiencies(y_score, bin_indices, cut, sample_weight, minlength=None):
"""Efficiency of bin = total weight of (signal) events that passed the cut
in the bin / total weight of signal events in the bin.
Returns small negative number for empty bins"""
y_score = column_or_1d(y_score)
assert len(y_score) == len(sample_weight) == len(bin_indices), "different size"
if minlength is None:
minlength = numpy.max(bin_indices) + 1
bin_total = numpy.bincount(bin_indices, weights=sample_weight, minlength=minlength)
passed_cut = y_score > cut
bin_passed_cut = numpy.bincount(bin_indices[passed_cut],
weights=sample_weight[passed_cut], minlength=minlength)
return bin_passed_cut / numpy.maximum(bin_total, 1)
def compute_group_efficiencies_by_indices(y_score, groups_indices, cut, divided_weight=None, smoothing=0.0):
""" Provided cut, computes efficiencies inside each bin.
:param divided_weight: weight for each event, divided by the number of it's occurences """
y_score = column_or_1d(y_score)
divided_weight = check_sample_weight(y_score, sample_weight=divided_weight)
# with smoothing=0, this is 0 or 1, latter for passed events.
passed_cut = sigmoid_function(y_score - cut, width=smoothing)
if isinstance(groups_indices, numpy.ndarray) and numpy.ndim(groups_indices) == 2:
# this speedup is specially for knn
result = numpy.average(numpy.take(passed_cut, groups_indices),
weights=numpy.take(divided_weight, groups_indices),
axis=1)
else:
result = numpy.zeros(len(groups_indices))
for i, group in enumerate(groups_indices):
result[i] = numpy.average(passed_cut[group], weights=divided_weight[group])
return result
def compute_group_efficiencies(y_score, groups_matrix, cut, divided_weight=None, smoothing=0.0):
""" Provided cut, computes efficiencies inside each bin.
:param divided_weight: weight for each event, divided by the number of it's occurences """
y_score = column_or_1d(y_score)
divided_weight = check_sample_weight(y_score, sample_weight=divided_weight)
# with smoothing=0, this is 0 or 1, latter for passed events.
passed_cut = sigmoid_function(y_score - cut, width=smoothing)
passed_weight = groups_matrix.dot(divided_weight * passed_cut)
total_weight = groups_matrix.dot(divided_weight)
return passed_weight / numpy.maximum(total_weight, 1e-10)
def weighted_deviation(a, weights, power=2.):
""" sum weight * |x - x_mean|^power, measures deviation from mean """
mean = numpy.average(a, weights=weights)
return numpy.average(numpy.abs(mean - a) ** power, weights=weights)
# endregion
# region Special methods for uniformity metrics
def theil(x, weights):
"""Theil index of array with regularization"""
assert numpy.all(x >= 0), "negative numbers can't be used in Theil"
x_mean = numpy.average(x, weights=weights)
normed = x / x_mean
# to avoid problems with log of negative number.
normed[normed < 1e-20] = 1e-20
return numpy.average(normed * numpy.log(normed), weights=weights)
def _ks_2samp_fast(prepared_data1, data2, prepared_weights1, weights2, cdf1):
"""Pay attention - prepared data should not only be sorted,
but equal items should be merged (by summing weights),
data2 should not have elements larger then max(prepared_data1) """
indices = numpy.searchsorted(prepared_data1, data2)
weights2 /= numpy.sum(weights2)
prepared_weights2 = numpy.bincount(indices, weights=weights2, minlength=len(prepared_data1))
cdf2 = compute_cdf(prepared_weights2)
return numpy.max(numpy.abs(cdf1 - cdf2))
def ks_2samp_weighted(data1, data2, weights1, weights2):
"""Kolmogorov-Smirnov distance, almost the same as ks2samp from scipy.stats, but this version supports weights.
:param data1: array-like of shape [n_samples1]
:param data2: array-like of shape [n_samples2]
:param weights1: None or array-like of shape [n_samples1]
:param weights2: None or array-like of shape [n_samples2]
:return: float, Kolmogorov-Smirnov distance.
"""
x = numpy.unique(numpy.concatenate([data1, data2]))
weights1 = weights1 / numpy.sum(weights1) * 1.
weights2 = weights2 / numpy.sum(weights2) * 1.
inds1 = numpy.searchsorted(x, data1)
inds2 = numpy.searchsorted(x, data2)
w1 = numpy.bincount(inds1, weights=weights1, minlength=len(x))
w2 = numpy.bincount(inds2, weights=weights2, minlength=len(x))
F1 = compute_cdf(w1)
F2 = compute_cdf(w2)
return numpy.max(numpy.abs(F1 - F2))
def _cvm_2samp_fast(prepared_data1, data2, prepared_weights1, weights2, cdf1, power=2.):
"""Pay attention - prepared data should not only be sorted,
but equal items should be merged (by summing weights) """
indices = numpy.searchsorted(prepared_data1, data2)
weights2 /= numpy.sum(weights2)
prepared_weights2 = numpy.bincount(indices, weights=weights2, minlength=len(prepared_data1))
cdf2 = compute_cdf(prepared_weights2)
return numpy.average(numpy.abs(cdf1 - cdf2) ** power, weights=prepared_weights1)
# endregion
| apache-2.0 |
yyjiang/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
equialgo/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
research-team/NEUCOGAR | NEST/misc/old/main.py | 2 | 23252 | from matplotlib import patches
import numpy as np
import pickle
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import RecurrentNetwork, FeedForwardNetwork
from pybrain.structure.connections.full import FullConnection
from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.modules.sigmoidlayer import SigmoidLayer
from pybrain.supervised import BackpropTrainer
import matplotlib.pyplot as plt
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml import NetworkWriter, NetworkReader
from image_processing import get_cat_dog_trainset, get_cat_dog_testset
from neuromodulation.connection import NMConnection
import root
def generateTrainingData(size=10000, saveAfter = False):
"""
Creates a set of training data with 4-dimensioanal input and 2-dimensional output
with `size` samples
"""
np.random.seed()
data = SupervisedDataSet(4,2)
for i in xrange(1, int(size/2)):
[a, b] = np.random.random_integers(1, 100, 2)
[c, d] = np.random.random_integers(100, 500, 2)
data.addSample((a, b, c, d), (-1, 1))
for i in xrange(1, int(size/2)):
[a, b] = np.random.random_integers(100, 500, 2)
[c, d] = np.random.random_integers(1, 100, 2)
data.addSample((a, b, c, d), (1, -1))
if saveAfter:
data.saveToFile(root.path()+"/res/dataSet")
return data
def getDatasetFromFile(path = "/res/dataSet"):
return SupervisedDataSet.loadFromFile(path)
# def getRecNetFromFile(path):
def exportANN(net, fileName = root.path()+"/res/recANN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importANN(fileName = root.path()+"/res/recANN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportRNN(net, fileName = root.path()+"/res/recRNN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importRNN(fileName = root.path()+"/res/recRNN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportRFCNN(net, fileName = root.path()+"/res/recRFCNN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importRFCNN(fileName = root.path()+"/res/recRFCNN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportCatDogANN(net, fileName = root.path()+"/res/cat_dog_params"):
arr = net.params
np.save(fileName, arr)
def exportCatDogRNN(net, fileName = root.path()+"/res/cat_dog_nm_params"):
# arr = net.params
# np.save(fileName, arr)
# fileObject = open(fileName+'.pickle', 'w')
# pickle.dump(net, fileObject)
# fileObject.close()
NetworkWriter.writeToFile(net, fileName+'.xml')
def exportCatDogRFCNN(net, fileName = root.path()+"/res/cat_dog_fc_params"):
# arr = net.params
# np.save(fileName, arr)
# fileObject = open(fileName+'.pickle', 'w')
# pickle.dump(net, fileObject)
# fileObject.close()
NetworkWriter.writeToFile(net, fileName+'.xml')
def importCatDogANN(fileName = root.path()+"/res/recCatDogANN"):
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(7500, name='in'))
n.addModule(SigmoidLayer(9000, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
params = np.load(root.path()+'/res/cat_dog_params.txt.npy')
n._setParameters(params)
return n
def importCatDogRNN(fileName = root.path()+"/res/recCatDogANN"):
n = NetworkReader.readFrom(root.path()+"/res/cat_dog_nm_params.xml")
return n
def trainedRNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
# n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
n.sortModules()
draw_connections(n)
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 50:
return trainedRNN()
# exportRNN(n)
draw_connections(n)
return n
def trainedANN():
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
draw_connections(n)
# d = generateTrainingData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 20:
return trainedANN()
exportANN(n)
draw_connections(n)
return n
#return trained recurrent full connected neural network
def trainedRFCNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
draw_connections(n)
# d = generateTraininqgData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count = count + 1
if (count == 100):
return trainedRFCNN()
# for i in range(100):
# print t.train()
exportRFCNN(n)
draw_connections(n)
return n
def draw_connections(net):
for mod in net.modules:
print "Module:", mod.name
if mod.paramdim > 0:
print "--parameters:", mod.params
for conn in net.connections[mod]:
print "-connection to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
if hasattr(net, "recurrentConns"):
print "Recurrent connections"
for conn in net.recurrentConns:
print "-", conn.inmod.name, " to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
def initial_with_zeros(net):
zeros = ([10.0]*len(net.params))
net._setParameters(zeros)
def draw_graphics(net, path_net = None):
red_patch = patches.Patch(color='red', label='First neuron')
blue_patch = patches.Patch(color='blue', label='Second neuron')
orange_patch = patches.Patch(color='orange', label='Both neurons')
black_patch = patches.Patch(color='black', label='Neither')
path = path_net + 'h;h;x;y/'
k = 0
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+str(value2)+","+"x"+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, value2, i, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'h;x;h;y/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+"x"+","+str(value2)+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, i, value2, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'h;x;y;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+"x"+","+"y"+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, i, j, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;h;y;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+str(value1)+","+"y"+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, value1, j, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;y;h;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+"y"+","+str(value1)+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, j, value1, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;h;h;y/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+str(value1)+","+str(value2)+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, value1, value2, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
# plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
# plt.show()
def calculateCapacity(net):
count1st = 0
count2nd = 0
both = 0
neither = 0
total = 0
for x1 in range(0, 500, 20):
for x2 in range(0, 500, 20):
for x3 in range(0, 500, 20):
for x4 in range(0, 500, 20):
activation = net.activate([x1, x2, x3, x4])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
count1st += 1
total += 1
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
count2nd += 1
total += 1
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
both += 1
total += 1
else:
color = 'black'
neither += 1
total += 1
print 'iteration: ', x1
count1st = float(count1st)*100/float(total)
count2nd = float(count2nd)*100/float(total)
neither = float(neither)*100/float(total)
both = float(both)*100/float(total)
print '1st: ', count1st
print '2nd: ', count2nd
print 'neither: ', neither
print 'both', both
return count1st, count2nd, both, neither
def trained_cat_dog_ANN():
n = FeedForwardNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
n.convertToFastNetwork()
print 'successful converted to fast network'
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogANN(n)
return n
def trained_cat_dog_RNN():
n = RecurrentNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogRNN(n)
return n
def trained_cat_dog_RFCNN():
n = RecurrentNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogRFCNN(n)
return n
def get_class(arr):
len_arr = len(arr)
for i in range(len_arr):
if arr[i] > 0:
arr[i] = 1
else:
arr[i] = 0
return arr
def run():
# n = trainedANN()
# n1 = importANN()
total_first = []
total_second = []
total_both = []
total_neither = []
for i in range(10):
n2 = trainedRNN()
res = calculateCapacity(n2)
total_first.append(res[0])
total_second.append(res[1])
total_both.append(res[2])
total_neither.append(res[3])
print 'first: mean', np.mean(total_first), 'variance', np.var(total_first)
print 'second: mean', np.mean(total_second), 'variance', np.var(total_second)
print 'both: mean', np.mean(total_both), 'variance', np.var(total_both)
print 'neither: mean', np.mean(total_neither), 'variance', np.var(total_neither)
exit()
# n2 = importRNN()
# n = trainedRFCNN()
# n3 = importRFCNN()
# draw_graphics(n1, path_net=root.path() + '/Graphics/ANN/')
# draw_graphics(n2, path_net=root.path() + '/Graphics/RNMNN/')
# draw_graphics(n3, path_net=root.path() + '/Graphics/RFCNN/')
# calculateCapacity(n1)
# calculateCapacity(n3)
exit()
# print 'ann:'
# for x in [(1, 15, 150, 160), (1, 15, 150, 160),
# (100, 110, 150, 160), (150, 160, 10, 15),
# (150, 160, 10, 15), (200, 200, 100, 100),
# (10, 15, 300, 250), (250, 300, 15, 10)]:
# print("n.activate(%s) == %s\n" % (x, n.activate(x)))
# calculateCapacity(n)
# draw_graphics(n)
print "hello"
n = importCatDogANN()
# exit()
# n = importCatDogRFCNN()
# NetworkWriter.writeToFile(n, root.path()+'/res/text.xml')
# n = NetworkReader.readFrom(root.path()+'/res/text.xml')
print type(n)
# exit()
ds = get_cat_dog_testset()
for inp, targ in ds:
activate = n.activate(inp)
print "activate:", activate, "expected:", targ
# draw_graphics(n)
# n = 4
# print np.random.random_integers(0, 1, n)
# exit()
# generateTrainingData(saveAfter=True)
if __name__ == "__main__":
run()
"""
RNN(neuromodulation):
1st: 3095
2nd: 2643229
neither: 28162
both 3575514
RNN(neuromodulation new)
1st: 3533955
2nd: 1977645
neither: 0
both 738400
ANN:
1st: 9803
2nd: 46325
neither: 425659
both 5768213
Recurrent fully connected neural network
1st: 504753
2nd: 555727
neither: 1768
both 5187752
""" | gpl-2.0 |
void32/mpld3 | mpld3/plugins.py | 5 | 25069 | """
Plugins to add behavior to mpld3 charts
=======================================
Plugins are means of adding additional javascript features to D3-rendered
matplotlib plots. A number of plugins are defined here; it is also possible
to create nearly any imaginable behavior by defining your own custom plugin.
"""
__all__ = ['connect', 'clear', 'get_plugins', 'PluginBase',
'Reset', 'Zoom', 'BoxZoom',
'PointLabelTooltip', 'PointHTMLTooltip', 'LineLabelTooltip',
'MousePosition']
import collections
import json
import uuid
import matplotlib
from .utils import get_id
def get_plugins(fig):
"""Get the list of plugins in the figure"""
connect(fig)
return fig.mpld3_plugins
def connect(fig, *plugins):
"""Connect one or more plugins to a figure
Parameters
----------
fig : matplotlib Figure instance
The figure to which the plugins will be connected
*plugins :
Additional arguments should be plugins which will be connected
to the figure.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), '-k')
>>> plugins.connect(fig, plugins.LineLabelTooltip(lines[0]))
"""
if not isinstance(fig, matplotlib.figure.Figure):
raise ValueError("plugins.connect: first argument must be a figure")
if not hasattr(fig, 'mpld3_plugins'):
fig.mpld3_plugins = DEFAULT_PLUGINS[:]
for plugin in plugins:
fig.mpld3_plugins.append(plugin)
def clear(fig):
"""Clear all plugins from the figure, including defaults"""
fig.mpld3_plugins = []
class PluginBase(object):
def get_dict(self):
return self.dict_
def javascript(self):
if hasattr(self, "JAVASCRIPT"):
if hasattr(self, "js_args_"):
return self.JAVASCRIPT.render(self.js_args_)
else:
return self.JAVASCRIPT
else:
return ""
def css(self):
if hasattr(self, "css_"):
return self.css_
else:
return ""
class Reset(PluginBase):
"""A Plugin to add a reset button"""
dict_ = {"type": "reset"}
class MousePosition(PluginBase):
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MousePosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt=".3g"):
self.dict_ = {"type": "mouseposition",
"fontsize": fontsize,
"fmt": fmt}
class Zoom(PluginBase):
"""A Plugin to add zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class BoxZoom(PluginBase):
"""A Plugin to add box-zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "boxzoom",
"button": button,
"enabled": enabled}
class PointLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : array or None
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, PointLabelTooltip(points[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LineLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over a line.
Parameters
----------
line : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), 'o')
>>> plugins.connect(fig, LineLabelTooltip(lines[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, label=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
self.dict_ = {"type": "tooltip",
"id": get_id(points),
"labels": label if label is None else [label],
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LinkedBrush(PluginBase):
"""A Plugin to enable linked brushing between plots
Parameters
----------
points : matplotlib Collection or Line2D object
A representative of the scatter plot elements to brush.
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. default=True.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpld3 import fig_to_html, plugins
>>> X = np.random.random((3, 100))
>>> fig, ax = plt.subplots(3, 3)
>>> for i in range(2):
... for j in range(2):
... points = ax[i, j].scatter(X[i], X[j])
>>> plugins.connect(fig, LinkedBrush(points))
>>> fig_to_html(fig)
Notes
-----
Notice that in the above example, only one of the four sets of points is
passed to the plugin. This is all that is needed: for the sake of efficient
data storage, mpld3 keeps track of which plot objects draw from the same
data.
Also note that for the linked brushing to work correctly, the data must
not contain any NaNs. The presence of NaNs makes the different data views
have different sizes, so that mpld3 is unable to link the related points.
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrush",
"button": button,
"enabled": enabled,
"id": get_id(points, suffix)}
class PointHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, PointHTMLTooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "htmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
class LineHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
The label for the line, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10))
>>> label = '<h1>line {title}</h1>'.format(title='A')
>>> plugins.connect(fig, LineHTMLTooltip(lines[0], label))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("linehtmltooltip", LineHTMLTooltip);
LineHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
LineHTMLTooltip.prototype.constructor = LineHTMLTooltip;
LineHTMLTooltip.prototype.requiredProps = ["id"];
LineHTMLTooltip.prototype.defaultProps = {label:null,
hoffset:0,
voffset:10};
function LineHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
var label = this.props.label
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(label)
.style("visibility", "visible");
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");})
};
"""
def __init__(self, line, label=None,
hoffset=0, voffset=10,
css=None):
self.line = line
self.label = label
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
self.dict_ = {"type": "linehtmltooltip",
"id": get_id(line),
"label": label,
"hoffset": hoffset,
"voffset": voffset}
class InteractiveLegendPlugin(PluginBase):
"""A plugin for an interactive legends.
Inspired by http://bl.ocks.org/simzou/6439398
Parameters
----------
plot_elements : iterable of matplotlib elements
the elements to associate with a given legend items
labels : iterable of strings
The labels for each legend element
ax : matplotlib axes instance, optional
the ax to which the legend belongs. Default is the first
axes. The legend will be plotted to the right of the specified
axes
alpha_unsel : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is unselected.
Default is 0.2
alpha_over : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is overlaid.
Default is 1 (no effect), 1.5 works nicely !
start_visible : boolean, optional
defines if objects should start selected on not.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> N_paths = 5
>>> N_steps = 100
>>> x = np.linspace(0, 10, 100)
>>> y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
>>> y = y.cumsum(1)
>>> fig, ax = plt.subplots()
>>> labels = ["a", "b", "c", "d", "e"]
>>> line_collections = ax.plot(x, y.T, lw=4, alpha=0.6)
>>> interactive_legend = plugins.InteractiveLegendPlugin(line_collections,
... labels,
... alpha_unsel=0.2,
... alpha_over=1.5,
... start_visible=True)
>>> plugins.connect(fig, interactive_legend)
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_unsel":0.2,
"alpha_over":1.0,
"start_visible":true}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_unsel = this.props.alpha_unsel;
var alpha_over = this.props.alpha_over;
var start_visible = this.props.start_visible;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = start_visible; // should become be setable from python side
legendItems.push(obj);
set_alphas(obj, false);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height", 10)
.attr("width", 25)
.attr("x", ax.width + ax.position[0] + 25)
.attr("y",function(d,i) {
return ax.position[1] + i * 25 + 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click).on('mouseover', over).on('mouseout', out);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width + ax.position[0] + 25 + 40;})
.attr("y", function(d,i) {
return ax.position[1] + i * 25 + 10 + 10 - 1;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
set_alphas(d, false);
};
// specify the action on legend overlay
function over(d,i){
set_alphas(d, true);
};
// specify the action on legend overlay
function out(d,i){
set_alphas(d, false);
};
// helper function for setting alphas
function set_alphas(d, is_over){
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
var current_alpha = d.mpld3_elements[i].props.alpha;
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("stroke-width", is_over ?
alpha_over * d.mpld3_elements[i].props.edgewidth : d.mpld3_elements[i].props.edgewidth);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
var current_alpha = d.mpld3_elements[i].props.alphas[0];
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("fill-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel));
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
css_ = """
.legend-box {
cursor: pointer;
}
"""
def __init__(self, plot_elements, labels, ax=None,
alpha_unsel=0.2, alpha_over=1., start_visible=True):
self.ax = ax
if ax:
ax = get_id(ax)
mpld3_element_ids = self._determine_mpld3ids(plot_elements)
self.mpld3_element_ids = mpld3_element_ids
self.dict_ = {"type": "interactive_legend",
"element_ids": mpld3_element_ids,
"labels": labels,
"ax": ax,
"alpha_unsel": alpha_unsel,
"alpha_over": alpha_over,
"start_visible": start_visible}
def _determine_mpld3ids(self, plot_elements):
"""
Helper function to get the mpld3_id for each
of the specified elements.
"""
mpld3_element_ids = []
# There are two things being done here. First,
# we make sure that we have a list of lists, where
# each inner list is associated with a single legend
# item. Second, in case of Line2D object we pass
# the id for both the marker and the line.
# on the javascript side we filter out the nulls in
# case either the line or the marker has no equivalent
# D3 representation.
for entry in plot_elements:
ids = []
if isinstance(entry, collections.Iterable):
for element in entry:
mpld3_id = get_id(element)
ids.append(mpld3_id)
if isinstance(element, matplotlib.lines.Line2D):
mpld3_id = get_id(element, 'pts')
ids.append(mpld3_id)
else:
ids.append(get_id(entry))
if isinstance(entry, matplotlib.lines.Line2D):
mpld3_id = get_id(entry, 'pts')
ids.append(mpld3_id)
mpld3_element_ids.append(ids)
return mpld3_element_ids
DEFAULT_PLUGINS = [Reset(), Zoom(), BoxZoom()]
| bsd-3-clause |
avmarchenko/exatomic | exatomic/gaussian/tests/test_output.py | 3 | 7007 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam1.atom)))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam2.atom)))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(self.mam1.basis_set)))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(self.mam2.basis_set)))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam1.orbital)))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam2.orbital)))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(self.mam1.momatrix)))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(self.mam2.momatrix)))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam1.basis_set_order)))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam2.basis_set_order)))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam1.frame)))
self.mam2.parse_frame()
self.assertEqual(self.mam2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam2.frame)))
def test_to_universe(self):
"""Test the to_universe method."""
mam1 = self.mam1.to_universe(ignore=True)
mam2 = self.mam2.to_universe(ignore=True)
for uni in [mam1, mam2]:
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
class TestOutput(TestCase):
"""
This test ensures that the parsing functionality works on
a smattering of output files that were generated with the
Gaussian software package. Target syntax is for Gaussian
09.
"""
def setUp(self):
# TODO : add some cartesian basis set files
# a geometry optimization and
# maybe properties? like the frequency
# and tddft calcs
self.uo2 = Output(resource('g09-uo2.out'))
self.mam3 = Output(resource('g09-ch3nh2-631g.out'))
self.mam4 = Output(resource('g09-ch3nh2-augccpvdz.out'))
def test_parse_atom(self):
self.uo2.parse_atom()
self.assertEqual(self.uo2.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(self.uo2.atom)))
self.mam3.parse_atom()
self.assertEqual(self.mam3.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam3.atom)))
self.mam4.parse_atom()
self.assertEqual(self.mam4.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam4.atom)))
def test_parse_basis_set(self):
self.uo2.parse_basis_set()
self.assertEqual(self.uo2.basis_set.shape[0], 49)
self.assertTrue(np.all(pd.notnull(self.uo2.basis_set)))
self.mam3.parse_basis_set()
self.assertEqual(self.mam3.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(self.mam3.basis_set)))
self.mam4.parse_basis_set()
self.assertEqual(self.mam4.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(self.mam4.basis_set)))
def test_parse_orbital(self):
self.uo2.parse_orbital()
self.assertEqual(self.uo2.orbital.shape[0], 141)
self.assertTrue(np.all(pd.notnull(self.uo2.orbital)))
self.mam3.parse_orbital()
self.assertEqual(self.mam3.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam3.orbital)))
self.mam4.parse_orbital()
self.assertEqual(self.mam4.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam4.orbital)))
def test_parse_momatrix(self):
self.uo2.parse_momatrix()
self.assertEqual(self.uo2.momatrix.shape[0], 19881)
self.assertTrue(np.all(pd.notnull(self.uo2.momatrix)))
self.mam3.parse_momatrix()
self.assertEqual(self.mam3.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(self.mam3.momatrix)))
self.mam4.parse_momatrix()
self.assertEqual(self.mam4.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(self.mam4.momatrix)))
def test_parse_basis_set_order(self):
self.uo2.parse_basis_set_order()
self.assertEqual(self.uo2.basis_set_order.shape[0], 141)
self.assertTrue(np.all(pd.notnull(self.uo2.basis_set_order)))
self.mam3.parse_basis_set_order()
self.assertEqual(self.mam3.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam3.basis_set_order)))
self.mam4.parse_basis_set_order()
self.assertEqual(self.mam4.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam4.basis_set_order)))
def test_parse_frame(self):
self.uo2.parse_frame()
self.assertEqual(self.uo2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.uo2.frame)))
self.mam3.parse_frame()
self.assertEqual(self.mam3.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam3.frame)))
self.mam4.parse_frame()
self.assertEqual(self.mam4.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam4.frame)))
def test_to_universe(self):
"""Test the to_universe method."""
uo2 = self.uo2.to_universe(ignore=True)
mam3 = self.mam3.to_universe(ignore=True)
for uni in [uo2, mam3]:
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
| apache-2.0 |
Adai0808/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
ryanbressler/pydec | pydec/vis/draw.py | 6 | 5172 | __all__ = ['triplot','lineplot','lineplot2','cube_quivers','simplex_quivers']
try:
import matplotlib.collections, matplotlib.pylab
except ImportError:
import warnings
warnings.warn("matplotlib not installed, some loss of functionality will result")
from scipy import rand,asarray,zeros,empty,average
from pydec import barycentric_gradients,combinations,Simplex
import numpy
def triplot(vertices, indices, labels=False):
"""
Plot a 2D triangle mesh
"""
vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [triangle index][vertex index][x/y value]
triangles = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],3,2))
col = matplotlib.collections.PolyCollection(triangles)
col.set_facecolor('grey')
col.set_alpha(0.5)
col.set_linewidth(1)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(triangles,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def lineplot2(tails,heads,labels=False,linewidths=1):
#vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [segment index][vertex index][x/y value]
#lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
data = empty((len(tails),2,2))
data[:,0,:] = tails
data[:,1,:] = heads
col = matplotlib.collections.LineCollection(data)
col.set_color('k')
col.set_linewidth(linewidths)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(lines,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def lineplot(vertices,indices,labels=False,linewidths=1):
"""
Plot 2D line segments
"""
vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [segment index][vertex index][x/y value]
lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
col = matplotlib.collections.LineCollection(lines)
col.set_color('k')
col.set_linewidth(linewidths)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(lines,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def cube_quivers(cmplx,vals):
N = cmplx.complex_dimension()
quiver_dirs = zeros((cmplx[2].cube_array.shape[0],N))
edge_to_face = cmplx[2].boundary.T.tocsr()
edge_to_face.data = numpy.abs(edge_to_face.data)
num_edges = cmplx[1].cube_array.shape[0]
for i in range(N):
i_edges = (cmplx[1].cube_array[:,-1] == i)
i_vals = zeros(num_edges)
i_vals[i_edges] = vals[i_edges]
quiver_dirs[:,i] = 0.5*(edge_to_face*i_vals)
quiver_bases = cmplx[2].cube_array[:,:N] + 0.5
return quiver_bases,quiver_dirs
def simplex_quivers(sc,form):
"""
Sample a Whitney 1-form at simplex barycenters
"""
quiver_bases = average(sc.vertices[sc[-1].simplices],axis=1)
quiver_dirs = zeros((sc[-1].num_simplices,sc.embedding_dimension()))
s_to_i = sc[1].simplex_to_index
for n,s in enumerate(sc[-1].simplices):
verts = sorted(s)
d_lambda = barycentric_gradients(sc.vertices[verts,:])
edges = [Simplex(x) for x in combinations(s,2)]
indices = [s_to_i[x] for x in edges]
values = [form[i] for i in indices]
for e,v in zip(combinations(range(len(verts)),2),values):
quiver_dirs[n,:] += v*(d_lambda[e[1]] - d_lambda[e[0]])
quiver_dirs /= (sc.complex_dimension() + 1)
return quiver_bases,quiver_dirs
##from scipy import *
##from pydec import *
##from pylab import quiver,show
##v = array([[0,0],[1,0],[0,1]])
##s = array([[0,1,2]])
##sc = SimplicialComplex(v,s)
##b,d = simplex_quivers(sc,array([1.0,0.0,0.0]))
##quiver(b[:,0],b[:,1],d[:,0],d[:,1])
##show()
| bsd-3-clause |
akidhruv/Computational_Cauldron | PYTHON/Navier_Stokes/plot.py | 1 | 3039 | import matplotlib.pyplot as plt
import numpy as np
data1=np.loadtxt('LidData200.dat')
data2=np.loadtxt('LidData201.dat')
data3=np.loadtxt('LidData202.dat')
data4=np.loadtxt('LidData203.dat')
data5=np.loadtxt('LidData204.dat')
data6=np.loadtxt('LidData205.dat')
data7=np.loadtxt('LidData206.dat')
data8=np.loadtxt('LidData207.dat')
M=32+1
N=4
x1=np.zeros((N,M),dtype=float)
y1=np.zeros((N,M),dtype=float)
u1=np.zeros((N,M),dtype=float)
v1=np.zeros((N,M),dtype=float)
x2=np.zeros((N,M),dtype=float)
y2=np.zeros((N,M),dtype=float)
u2=np.zeros((N,M),dtype=float)
v2=np.zeros((N,M),dtype=float)
x3=np.zeros((N,M),dtype=float)
y3=np.zeros((N,M),dtype=float)
u3=np.zeros((N,M),dtype=float)
v3=np.zeros((N,M),dtype=float)
x4=np.zeros((N,M),dtype=float)
y4=np.zeros((N,M),dtype=float)
u4=np.zeros((N,M),dtype=float)
v4=np.zeros((N,M),dtype=float)
X=np.zeros((M,M),dtype=float)
Y=np.zeros((M,M),dtype=float)
U=np.zeros((M,M),dtype=float)
V=np.zeros((M,M),dtype=float)
x5=np.zeros((N,M),dtype=float)
y5=np.zeros((N,M),dtype=float)
u5=np.zeros((N,M),dtype=float)
v5=np.zeros((N,M),dtype=float)
x6=np.zeros((N,M),dtype=float)
y6=np.zeros((N,M),dtype=float)
u6=np.zeros((N,M),dtype=float)
v6=np.zeros((N,M),dtype=float)
x7=np.zeros((N,M),dtype=float)
y7=np.zeros((N,M),dtype=float)
u7=np.zeros((N,M),dtype=float)
v7=np.zeros((N,M),dtype=float)
x8=np.zeros((N,M),dtype=float)
y8=np.zeros((N,M),dtype=float)
u8=np.zeros((N,M),dtype=float)
v8=np.zeros((N,M),dtype=float)
k=0
for i in range(N):
for j in range(M):
x1[i,j]=data1[k,0]
y1[i,j]=data1[k,1]
u1[i,j]=data1[k,2]
v1[i,j]=data1[k,3]
x2[i,j]=data2[k,0]
y2[i,j]=data2[k,1]
u2[i,j]=data2[k,2]
v2[i,j]=data2[k,3]
x3[i,j]=data3[k,0]
y3[i,j]=data3[k,1]
u3[i,j]=data3[k,2]
v3[i,j]=data3[k,3]
x4[i,j]=data4[k,0]
y4[i,j]=data4[k,1]
u4[i,j]=data4[k,2]
v4[i,j]=data4[k,3]
x5[i,j]=data5[k,0]
y5[i,j]=data5[k,1]
u5[i,j]=data5[k,2]
v5[i,j]=data5[k,3]
x6[i,j]=data6[k,0]
y6[i,j]=data6[k,1]
u6[i,j]=data6[k,2]
v6[i,j]=data6[k,3]
x7[i,j]=data7[k,0]
y7[i,j]=data7[k,1]
u7[i,j]=data7[k,2]
v7[i,j]=data7[k,3]
x8[i,j]=data8[k,0]
y8[i,j]=data8[k,1]
u8[i,j]=data8[k,2]
v8[i,j]=data8[k,3]
k=k+1
X=np.concatenate((x1,x2,x3,x4,x5,x6,x7,x8))
Y=np.concatenate((y1,y2,y3,y4,y5,y6,y7,y8))
U=np.concatenate((u1,u2,u3,u4,u5,u6,u7,u8))
V=np.concatenate((v1,v2,v3,v4,v5,v6,v7,v8))
plt.figure()
plt.title('Re = 1000, Grid = 100 x 100')
#plt.contour(X,Y,np.sqrt(U**2+V**2),density=5)
plt.streamplot(X,Y,U,V,density=5)
plt.plot(X[:,0],Y[:,0],'k')
plt.plot(X[:,-1],Y[:,-1],'k')
plt.plot(X[0,:],Y[0,:],'k')
plt.plot(X[-1,:],Y[-1,:],'k')
#plt.ylim(-0.2,1.2)
#plt.xlim(-0.2,2.2)
plt.axis('equal')
plt.show()
| mit |
sideshownick/Snaking_Networks | Oscillators/run_delaytimes.py | 1 | 2615 | import MutInf as mi
import numpy as np
fname='2015-09-09_flipflop.txt'
d1=mi.import_data(fname)
ts=5000.0 #points/second
#stt=1.6
#end=2.0
stt=0.0 #start time
end=60.0 #end time
# timestamps
t0=d1[0,stt*ts:end*ts]
# define data vectors/arrays
x1 = d1[2, stt*ts : end*ts] #time series data from osc_1
x2 = d1[4, stt*ts : end*ts] #time series data from osc_2
x3 = d1[6, stt*ts : end*ts] #time series data from osc_3
from matplotlib import pyplot as plt
#smooth by averaging over N points
N=200
t=np.zeros(len(t0)/N)
y1=np.zeros(len(t0)/N)
y2=np.zeros(len(t0)/N)
y3=np.zeros(len(t0)/N)
for i in range(len(t0)/N):
t[i] = np.mean(t0[i*N:i*N+N])
y1[i] = np.mean(x1[i*N:i*N+N])
#c1=(4+y1[i])/8.
#ax1.plot(t[i], 0, 's', color=(1.-c1, c1, 0), markersize=ms)
y2[i] = np.mean(x2[i*N:i*N+N])
#c2=(4+y2[i])/8.
#ax1.plot(t[i], 1, 's', color=(1.-c2, c2, 0), markersize=ms)
y3[i] = np.mean(x3[i*N:i*N+N])
#c3=(4+y3[i])/8.
#ax1.plot(t[i], 2, 's', color=(1.-c3, c3, 0), markersize=ms)
bins=(2,2,2)
delay=1
fig1=plt.figure(figsize=(18,6))
ax1=fig1.add_subplot(111)
#ms=100
mindelay=0.1 #seconds
maxdelay=10.0 #seconds
delaystep=0.1 #seconds
delays=range(int(mindelay*ts/N),int(maxdelay*ts/N),int(delaystep*ts/N))
#print delays
M12=np.zeros(len(delays))
M21=np.zeros(len(delays))
M23=np.zeros(len(delays))
M32=np.zeros(len(delays))
M31=np.zeros(len(delays))
M13=np.zeros(len(delays))
for i in range(len(delays)):
delay=delays[i]
M12[i] = mi.MutInf(y1, y2, Nbins=bins, s=delay)
M21[i] = mi.MutInf(y2, y1, Nbins=bins, s=delay)
M23[i] = mi.MutInf(y2, y3, Nbins=bins, s=delay)
M32[i] = mi.MutInf(y3, y2, Nbins=bins, s=delay)
M31[i] = mi.MutInf(y3, y1, Nbins=bins, s=delay)
M13[i] = mi.MutInf(y1, y3, Nbins=bins, s=delay)
#print np.array(delays)/ts, M12-M21
ax1.plot(np.array(delays)/ts*N, M12-M21, 'o-', label='1->2')
#ax1.plot(delays, M21, label='M21')
ax1.plot(np.array(delays)/ts*N, M23-M32, 'o-', label='2->3')
#ax1.plot(delays, M32, label='M32')
ax1.plot(np.array(delays)/ts*N, M31-M13, 'o-', label='3->1')
ax1.plot([mindelay,maxdelay], [0,0], '-')
#ax1.plot(delays, M13, label='M13')
ax1.set_xlabel("delay time (s)")
ax1.set_ylabel("Difference between TE measures")
ax1.legend()
#a,b,c,d=ax1.axis()
#ax1.axis([a,b,-1,3])
plt.savefig("test.png")
fig2=plt.figure(figsize=(18,6))
ax2=fig2.add_subplot(111)
ax2.plot(t, y1, '.', label="O1")
ax2.plot(t, y2, '.', label="O2")
ax2.plot(t, y3, '.', label="O3")
ax2.set_xlabel("time (s)")
ax2.set_ylabel("state of Nth Oscillator")
ax2.legend()
plt.savefig("test1.png")
| gpl-2.0 |
klocey/Emergence | tools/DiversityTools/mete/mete.py | 10 | 40946 | """Module for fitting and testing Harte et al.'s maximum entropy models
Terminology and notation follows Harte (2011)
"""
from __future__ import division
from math import log, exp, isnan, floor, ceil, factorial
import os.path
import sys
import cPickle
import numpy as np
import matplotlib.pyplot as plt
#import mpmath as mp
from scipy.optimize import bisect, fsolve
from scipy.stats import logser, geom
from numpy.random import random_integers
from random import uniform
from numpy import array, e, empty
def trunc_logser_pmf(x, p, upper_bound):
"""Probability mass function for the upper truncated log-series
Parameters
----------
x : array_like
Values of `x` for which the pmf should be determined
p : float
Parameter for the log-series distribution
upper_bound : float
Upper bound of the distribution
Returns
-------
pmf : array
Probability mass function for each value of `x`
"""
if p < 1:
return logser.pmf(x, p) / logser.cdf(upper_bound, p)
else:
x = np.array(x)
ivals = np.arange(1, upper_bound + 1)
normalization = sum(p ** ivals / ivals)
pmf = (p ** x / x) / normalization
return pmf
def trunc_logser_cdf(x_max, p, upper_bound):
"""Cumulative probability function for the upper truncated log-series"""
if p < 1:
#If we can just renormalize the untruncated cdf do so for speed
return logser.cdf(x_max, p) / logser.cdf(upper_bound, p)
else:
x_list = range(1, int(x_max) + 1)
cdf = sum(trunc_logser_pmf(x_list, p, upper_bound))
return cdf
def trunc_logser_rvs(p, upper_bound, size):
"""Random variates of the upper truncated log-series
Currently this function only supports random variate generation for p < 1.
This will cover most circumstances, but it is possible to have p >= 1 for
the truncated version of the distribution.
"""
assert p < 1, 'trunc_logser_rvs currently only supports random number generation for p < 1'
size = int(size)
rvs = logser.rvs(p, size=size)
for i in range(0, size):
while(rvs[i] > upper_bound):
rvs[i] = logser.rvs(p, size=1)
return rvs
def get_beta(Svals, Nvals, version='precise', beta_dict={}):
"""Solve for Beta, the sum of the two Lagrange multipliers for R(n, epsilon)
Parameters
----------
Svals : int or array_like
The number of species
Nvals : int or array_like
The total number of individuals
version : {'precise', 'untruncated', 'approx'}, optional
Determine which solution to use to solve for Beta. The default is
'precise', which uses minimal approximations.
'precise' uses minimal approximations and includes upper trunction of
the distribution at N_0 (eq. 7.27 from Harte et al. 2011)
'untruncated' uses minimal approximations, but assumes that the
distribution of n goes to infinity (eq. B.4 from Harte et al. 2008)
'approx' uses more approximations, but will run substantially faster,
especially for large N (equation 7.30 from Harte 2011)
beta_dict : dict, optional
A dictionary of beta values so that beta can be looked up rather than
solved numerically. This can substantially speed up execution.
Both Svals and Nvals can be vectors to allow calculation of multiple values
of Beta simultaneously. The vectors must be the same length.
Returns
-------
betas : list
beta values for each pair of Svals and Nvals
"""
#Allow both single values and iterables for S and N by converting single values to iterables
if not hasattr(Svals, '__iter__'):
Svals = array([Svals])
else:
Svals = array(Svals)
if not hasattr(Nvals, '__iter__'):
Nvals = array([Nvals])
else:
Nvals = array(Nvals)
assert len(Svals) == len(Nvals), "S and N must have the same length"
assert all(Svals > 1), "S must be greater than 1"
assert all(Nvals > 0), "N must be greater than 0"
assert all(Svals/Nvals < 1), "N must be greater than S"
assert version in ('precise', 'untruncated', 'approx'), "Unknown version provided"
betas = []
for i, S in enumerate(Svals):
N = Nvals[i]
# Set the distance from the undefined boundaries of the Lagrangian multipliers
# to set the upper and lower boundaries for the numerical root finders
BOUNDS = [0, 1]
DIST_FROM_BOUND = 10 ** -15
#If not, solve for beta using the substitution x = e**-beta
if (S, N) in beta_dict:
betas.append(beta_dict[(S, N)])
elif version == 'precise':
n = array(range(1, int(N)+1))
y = lambda x: sum(x ** n / N * S) - sum((x ** n) / n)
exp_neg_beta = bisect(y, BOUNDS[0] + DIST_FROM_BOUND,
min((sys.float_info[0] / S) ** (1 / N), 2),
xtol = 1.490116e-08)
betas.append(-1 * log(exp_neg_beta))
elif version == 'untruncated':
y = lambda x: 1 / log(1 / (1 - x)) * x / (1 - x) - N / S
exp_neg_beta = bisect(y, BOUNDS[0] + DIST_FROM_BOUND,
BOUNDS[1] - DIST_FROM_BOUND)
betas.append(-1 * log(exp_neg_beta))
elif version == 'approx':
y = lambda x: x * log(1 / x) - S / N
betas.append(fsolve(y, 0.0001))
#Store the value in the dictionary to avoid repeating expensive
#numerical routines for the same values of S and N. This is
#particularly important for determining pdfs through mete_distributions.
beta_dict[(S, N)] = betas[-1]
#If only a single pair of S and N values was passed, return a float
if len(betas) == 1:
betas = betas[0]
return betas
def get_lambda2(S, N, E):
"""Return lambda_2, the second Lagrangian multiplier for R(n, epsilon)
lambda_2 is calculated using equation 7.26 from Harte 2011.
"""
return S / (E - N)
def get_lambda1(S, N, E, version='precise', beta_dict={}):
"""Return lambda_1, the first Lagrangian multiplier for R(n, epsilon)
lamba_1 is calculated using equation 7.26 from Harte 2011 and get_beta().
"""
beta = get_beta(S, N, version, beta_dict)
return beta - get_lambda2(S, N, E)
def get_dict(filename):
"""Check if lookup dictionary for lamba exists. If not, create an empty one.
Arguments:
filename = is the name of the dictionary file to read from, (e.g., 'beta_lookup_table.pck')
"""
if os.path.exists(filename):
dict_file = open(filename, 'r')
dict_in = cPickle.load(dict_file)
dict_file.close()
print("Successfully loaded lookup table with %s lines" % len(dict_in))
else:
dict_file = open(filename, 'w')
dict_in = {}
cPickle.dump(dict_in, dict_file)
dict_file.close()
print("No lookup table found. Creating an empty table...")
return dict_in
def save_dict(dictionary, filename):
"""Save the current beta lookup table to a file
Arguments:
dictionary = the dictionary object to output
filename = the name of the dictionary file to write to (e.g., 'beta_lookup_table.pck')
"""
dic_output = open(filename, 'w')
cPickle.dump(dictionary, dic_output)
dic_output.close()
def build_beta_dict(S_start, S_end, N_max, N_min=1, filename='beta_lookup_table.pck'):
"""Add values to the lookup table for beta
Starting at S_start and finishing at S_end this function will take values
of N from S + 1 to N_max, determine if a value of beta is already in the
lookup table for the current value of values of S and N, and if not then
calculate the value of beta and add it to the dictionary.
Values are stored for S and N rather than for N/S because the precise form
of the solution (eq. 7.27 in Harte 2011) depends on N as well as N/S due
to the upper trunctation of the distribution at N.
"""
beta_dictionary = get_dict(filename)
for S in range(S_start, S_end + 1):
N_start = max(S + 1, N_min)
for N in range(N_start, N_max):
if (S, N) not in beta_dictionary:
beta_dictionary[(S, N)] = get_beta(S, N)
save_dict(beta_dictionary, filename)
def get_mete_pmf(S0, N0, beta = None):
"""Get the truncated log-series PMF predicted by METE"""
if beta == None:
beta = get_beta(S0, N0)
p = exp(-beta)
truncated_pmf = trunc_logser_pmf(range(1, int(N0) + 1), p, N0)
return truncated_pmf
def get_mete_sad(S0, N0, beta=None, bin_edges=None):
"""Get the expected number of species with each abundance
If no value is provided for beta it will be solved for using S0 & N0
If bin_edges is not provided then the values returned are the estimated
number of species for each integer value from 1 to N0
If bin_edges is provided it should be an array of bin edges including the
bottom and top edges. The last value in bin_edge should be > N0
"""
pmf = get_mete_pmf(S0, N0, beta)
if bin_edges != None:
N = array(range(1, int(N0) + 1))
binned_pmf = []
for edge in range(0, len(bin_edges) - 1):
bin_probability = sum(pmf[(N >= bin_edges[edge]) &
(N < bin_edges[edge + 1])])
binned_pmf.append(bin_probability)
pmf = array(binned_pmf)
predicted_sad = S0 * pmf
return predicted_sad
def get_lambda_spatialdistrib(A, A0, n0):
"""Solve for lambda_Pi from Harte 2011 equ. 7.50 and 7.51
Arguments:
A = the spatial scale of interest
A0 = the maximum spatial scale under consideration
n0 = the number of individuals of the focal species at scale A0
"""
assert type(n0) is int, "n must be an integer"
assert A > 0 and A0 > 0, "A and A0 must be greater than 0"
assert A <= A0, "A must be less than or equal to A0"
y = lambda x: x / (1 - x) - (n0 + 1) * x ** (n0 + 1) / (1 - x ** (n0 + 1)) - n0 * A / A0
if A < A0 / 2:
# Set the distance from the undefined boundaries of the Lagrangian multipliers
# to set the upper and lower boundaries for the numerical root finders
BOUNDS = [0, 1]
DIST_FROM_BOUND = 10 ** -15
exp_neg_lambda = bisect(y, BOUNDS[0] + DIST_FROM_BOUND,
BOUNDS[1] - DIST_FROM_BOUND)
elif A == A0 / 2:
#Special case from Harte (2011). See text between Eq. 7.50 and 7.51
exp_neg_lambda = 1
else:
# x can potentially go up to infinity
# thus use solution of a logistic equation as the starting point
exp_neg_lambda = (fsolve(y, - log(A0 / A - 1)))[0]
lambda_spatialdistrib = -1 * log(exp_neg_lambda)
return lambda_spatialdistrib
def get_spatialdistrib_dict(A, A0, n0, lambda_list=[0, {}]):
"""Solve for lambda_Pi from Harte 2011 equ. 7.50 and 7.51
Arguments:
A = the spatial scale of interest
A0 = the maximum spatial scale under consideration
n0 = the number of individuals of the focal species at scale A0
"""
if (A, A0, n0) not in lambda_list[1]:
lambda_list[1][(A, A0, n0)] = get_lambda_spatialdistrib(A, A0, n0)
lambda_list[0] = lambda_list[1][(A, A0, n0)]
return lambda_list
def get_mete_Pi(n, A, n0, A0):
"""
Solve for the METE Pi distribution from Harte 2011 equ 7.48 and 7.49.
Arguments:
n = number of individuals of the focal species at the scale A;
A = the spatial scale of interest;
n0 = the number of individuals of the focal species at scale A0;
A0 = the maximum spatial scale under consideration;
Returns:
The probability of observing n out of n0 individuals in a randomly selected quadrat
of area A out of A0
"""
x = exp(-get_lambda_spatialdistrib(A, A0, n0))
Z_Pi = sum([x ** i for i in range(0, n0 + 1)])
mete_Pi = x ** n / Z_Pi
return mete_Pi
def calc_S_from_Pi_fixed_abu(A, A0, n0vals):
"""
Downscales the expected number of species using the non-interative approach
Harte 2011 when the abundances are fixed, equ 3.12
Arguments:
A = the spatial scale of interest;
A0 = the maximum spatial scale under consideration;
S0 = the total number of species that occurs in A0
n0vals= a list of species abundances
Returns:
The expected number of species inferred from the anchor scale
"""
return S
def sar_noniterative(Avals, A0, S0, N0, version='approx'):
""" Computes the downscaled METE noninterative species-area relationship (SAR)
Harte 2011 equ C.1\
Arguments:
Avals: the spatial scales of interest, must be greater than zero and less than A0;
A0 = the maximum spatial scale under consideration;
S0 = the total number of species in A0;
N0 = the total number of individuals in A0;
version = 'approx' or 'precise', which specifies if an approximation is used for the pdf
of the SAD or if the precise truncated log-series form is used instead.
Returns:
A numpy array the first row contains the Avals, and the second row contains the expected
S values
"""
A_ok = [i > 0 and i < A0 for i in Avals]
if False in A_ok:
print "Warning: will only compute S for Areas that are greater than zero and less than A0"
Avals = [Avals[i] for i in which(A_ok)]
beta = get_beta(S0, N0)
if beta < 0 and version == 'approx':
print("ERROR! Cannot compute log of a negative beta value, change version to precise")
## compute relative abundance distribution
if version == 'approx':
rad = [exp(-beta * n) / (n * log(beta ** -1)) for n in range(1, N0 + 1)]
if version == 'precise':
rad = [trunc_logser_pmf(n, exp(-beta), N0) for n in range(1, N0 + 1)]
Svals = [S0 * sum([(1 - get_mete_Pi(0, A, n, A0)) * rad[n - 1] for n in range(1, N0 + 1)]) for A in Avals]
Svals.append(S0)
Avals.append(A0)
out = list()
out.append(Avals)
out.append(Svals)
return out
def sar_noniterative_fixed_abu(Avals, A0, n0vals):
"""Predictions for the downscaled METE SAR using Eq. 3.12 from Harte 2011 when the
abundances (n0) are fixed"""
A_ok = [i > 0 and i < A0 for i in Avals]
if False in A_ok:
print "Warning: will only compute S for Areas that are greater than zero and less than A0"
Avals = [Avals[i] for i in which(A_ok)]
Svals = [sum([1 - get_mete_Pi(0, A, n0, A0) for n0 in n0vals]) for A in Avals]
Svals.append(len(n0vals))
Avals.append(A0)
out = list()
out.append(Avals)
out.append(Svals)
return out
def get_mete_rad(S, N, beta=None, beta_dict={}):
"""Use beta to generate SAD predicted by the METE
Keyword arguments:
S -- the number of species
N -- the total number of individuals
beta -- allows input of beta by user if it has already been calculated
"""
assert S > 1, "S must be greater than 1"
assert N > 0, "N must be greater than 0"
assert S/N < 1, "N must be greater than S"
if beta is None:
beta = get_beta(S, N, beta_dict=beta_dict)
p = e ** -beta
abundance = list(empty([S]))
rank = range(1, int(S)+1)
rank.reverse()
if p >= 1:
for i in range(0, int(S)):
y = lambda x: trunc_logser_cdf(x, p, N) - (rank[i]-0.5) / S
if y(1) > 0:
abundance[i] = 1
else:
abundance[i] = int(round(bisect(y,1,N)))
else:
for i in range(0, int(S)):
y = lambda x: logser.cdf(x,p) / logser.cdf(N,p) - (rank[i]-0.5) / S
abundance[i] = int(round(bisect(y, 0, N)))
return (abundance, p)
def get_mete_sad_geom(S, N):
"""METE's predicted RAD when the only constraint is N/S
Keyword arguments:
S -- the number of species
N -- the total number of individuals
"""
assert S > 1, "S must be greater than 1"
assert N > 0, "N must be greater than 0"
assert S/N < 1, "N must be greater than S"
p = S / N
abundance = list(empty([S]))
rank = range(1, int(S)+1)
rank.reverse()
for i in range(0, int(S)):
y = lambda x: geom.cdf(x,p) / geom.cdf(N,p) - (rank[i]-0.5) / S
abundance[i] = int(round(bisect(y, 0, N)))
return (abundance, p)
def downscale_sar(A, S, N, Amin):
"""Predictions for downscaled SAR using Eq. 7 from Harte et al. 2009"""
beta = get_beta(S, N)
x = exp(-beta)
S = S / x - N * (1 - x) / (x - x ** (N + 1)) * (1 - x ** N / (N + 1))
A /= 2
N /= 2
if A <= Amin:
return ([A], [S])
else:
down_scaled_data = downscale_sar(A, S, N, Amin)
return (down_scaled_data[0] + [A], down_scaled_data[1] + [S])
def downscale_sar_fixed_abu(A0, n0vals, Amin):
"""Predictions for downscaled SAR when abundance is fixed using the iterative approach
by combining Eq. 7.51 and Eq. 3.12 from Harte 2011"""
flag = 0
A = A0
Avals = []
while flag == 0:
A /= 2
if (A >= Amin):
Avals.append(A)
else:
flag = 1
Avals.reverse()
Svals = [sum([1 - heap_prob(0, A, n0, A0) for n0 in n0vals]) for A in Avals]
S0 = len(n0vals)
Svals.append(S0)
Avals.append(A0)
out = []
out.append(Avals)
out.append(Svals)
return out
def upscale_sar(A, S, N, Amax):
"""Predictions for upscaled SAR using Eqs. 8 and 9 from Harte et al. 2009"""
def equations_for_S_2A(x, S_A, N_A):
"""Implicit equations for S(2A) given S(A) and N(A)"""
# TO DO: make this clearer by separating equations and then putting them
# in a list for output
out = [x[1] / x[0] - 2 * N_A *
(1 - x[0]) / (x[0] - x[0] ** (2 * N_A + 1)) *
(1 - x[0] ** (2 * N_A) / (2 * N_A + 1)) - S_A]
n = array(range(1, int(2 * N_A + 1)))
out.append(x[1] / 2 / N_A * sum(x[0] ** n) - sum(x[0] ** n / n))
return out
def solve_for_S_2A(S, N):
beta_A = get_beta(S, N)
# The slope at (N, S) along universal curve
# From Eqn.11 in Harte et al. 2009
# Initial guess of S_2A is calculated using extrapolation with z_A
if beta_A < 0: # to avoid taking the log of a negative number
z_A = 0
else:
z_A = 1 / log(2) / log(1 / beta_A)
if z_A < 0 or z_A > 1: # to avoid initial guesses that are obviously incorrect
S_2A_init = S * 1.5
else:
S_2A_init = S * 2 ** z_A
x_A = exp(-get_beta(S_2A_init, 2 * N))
x0 = fsolve(equations_for_S_2A, [x_A, S_2A_init], args=(S, N), full_output = 1)
S_2A, convergence = x0[0][1], x0[2]
if convergence != 1:
return float('nan')
else:
return S_2A
S = solve_for_S_2A(S, N)
A *= 2
N *= 2
if A >= Amax:
return ([A], [S])
elif isnan(S):
return ([A], S)
else:
up_scaled_data = upscale_sar(A, S, N, Amax)
return ([A] + up_scaled_data[0], [S] + up_scaled_data[1])
def sar(A0, S0, N0, Amin, Amax):
"""Harte et al. 2009 predictions for the species area relationship
Takes a minimum and a maximum area along with the area, richness, and
abundance at some anchor scale and determines the richness at all bisected
and/or doubled scales so as to include Amin and Amax.
"""
# This is where we will deal with adding the anchor scale to the results
def predicted_slope(S, N):
"""Calculates slope of the predicted line for a given S and N
by combining upscaling one level and downscaling one level from
the focal scale
"""
ans_lower = downscale_sar(2, S, N, 1)
if isnan(ans_lower[1][0]) == False:
S_lower = array(ans_lower[1][0])
ans_upper = upscale_sar(2, S, N, 4)
if isnan(ans_upper[1][0]) == True:
print("Error in upscaling. z cannot be computed.")
return float('nan')
else:
S_upper = array(ans_upper[1][0])
return (log(S_upper / S_lower) / log(4))
else:
print("Error in downscaling. Cannot find root.")
return float('nan')
def get_slopes(site_data):
"""get slopes from various scales, output list of area slope and N/S
input data is a list of lists, each list contains [area, mean S, mean N]
"""
# return a list containing 4 values: area, observed slope, predicted slope, and N/S
# ToDo: figure out why values of S as low as 1 still present problems for
# this approach, this appears to happen when S << N
data = array(site_data)
Zvalues = []
area = data[:, 0]
S_values = data[:, 1]
for a in area:
if a * 4 <= max(area): #stop at last area
S_down = float(S_values[area == a])
S_focal = float(S_values[area == a * 2 ])
S_up = float(S_values[area == a * 4])
if S_focal >= 2: #don't calculate if S < 2
N_focal = float(data[area == a * 2, 2])
z_pred = predicted_slope(S_focal, N_focal)
z_emp = (log(S_up) - log(S_down)) / log(4)
NS = N_focal / S_focal
parameters = [a * 2, z_emp, z_pred, NS]
Zvalues.append(parameters)
else:
continue
else:
break
return Zvalues
def plot_universal_curve(slopes_data):
"""plots ln(N/S) x slope for empirical data and MaxEnt predictions.
Predictions should look like Harte's universal curve
input data is a list of lists. Each list contains:
[area, empirical slope, predicted slope, N/S]
"""
#TO DO: Add argument for axes
slopes = array(slopes_data)
NS = slopes[:, 3]
z_pred = slopes[:, 2]
z_obs = slopes[:, 1]
#plot Harte's universal curve from predictions with empirical data to analyze fit
plt.semilogx(NS, z_pred, 'bo')
plt.xlabel("ln(N/S)")
plt.ylabel("Slope")
plt.hold(True)
plt.semilogx(NS, z_obs, 'ro')
plt.show()
def heap_prob(n, A, n0, A0, pdict={}):
"""
Determines the HEAP probability for n given A, no, and A0
Uses equation 4.15 in Harte 2011
Returns the probability that n individuals are observed in a quadrat of area A
"""
i = int(log(A0 / A,2))
key = (n, n0, i)
if key not in pdict:
if i == 1:
pdict[key] = 1 / (n0 + 1)
else:
A *= 2
pdict[key] = sum([heap_prob(q, A, n0, A0, pdict)/ (q + 1) for q in range(n, n0 + 1)])
return pdict[key]
def bisect_prob(n, A, n0, A0, psi, pdict={}):
"""
Univaritate pdf of the Bisection model
Theorem 2.3 in Conlisk et al. (2007)
psi is an aggregation parameter {0,1}
Note: when psi = 0.5 that the Bisection Model = HEAP Model
"""
total = 0
i = int(log(A0 / A, 2))
key = (n, n0, i, psi)
if key not in pdict:
if(i == 1):
pdict[key] = single_prob(n, n0, psi)
else:
A *= 2
pdict[key] = sum([bisect_prob(q, A, n0, A0, psi, pdict) * single_prob(n, q, psi) for q in range(n, n0 + 1)])
return pdict[key]
def heap_pmf(A, n0, A0):
"""Determines the probability mass function for HEAP
Uses equation 4.15 in Harte 2011
"""
pmf = [heap_prob(n, A, n0, A0) for n in range(0, n0 + 1)]
return pmf
def binomial(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
def get_big_binomial(n, k, fdict):
"""returns the natural log of the binomial coefficent n choose k"""
if n > 0 and k > 0:
nFact = fdict[n]
kFact = fdict[k]
nkFact = fdict[n - k]
return nFact - kFact - nkFact
else:
return 0
def get_heap_dict(n, A, n0, A0, plist=[0, {}]):
"""
Determines the HEAP probability for n given A, n0, and A0
Uses equation 4.15 in Harte 2011
Returns a list with the first element is the probability of n individuals
being observed in a quadrat of area A, and the second element is a
dictionary that was built to compute that probability
"""
i = int(log(A0 / A,2))
if (n,n0,i) not in plist[1]:
if i == 1:
plist[1][(n,n0,i)] = 1 / (n0 + 1)
else:
A = A * 2
plist[1][(n,n0,i)] = sum([get_heap_dict(q, A, n0, A0, plist)[0]/ (q + 1) for q in range(n, n0 + 1)])
plist[0] = plist[1][(n,n0,i)]
return plist
def build_heap_dict(n,n0,i, filename='heap_lookup_table.pck'):
"""Add values to the lookup table for heap"""
heap_dictionary = get_dict(filename)
if (n,n0,i) not in heap_dictionary:
A = 1
A0 = 2 ** i
heap_dictionary.update( get_heap_dict(n,A,n0,A0,[0,heap_dictionary])[1] )
save_dict(heap_dictionary, filename)
print("Dictionary building completed")
def get_lambda_heap(i, n0):
"""
Probability of observing at least one individual of a species with n0 individuals
given a randomly sampled quadrat of area A out of a total area A0.
This function uses the iterative or HEAP scaling model
Harte 2007, Scaling Biodiveristy Chp. Eq. 6.4, pg.106
i: number of bisections
n0: abundance
"""
if i == 0:
lambda_heap = 1
if i != 0:
A0 = 2 ** i
lambda_heap = 1 - heap_prob(0, 1, n0, A0)
return(lambda_heap)
def get_lambda_mete(i, n0):
"""
Probability of observing at least one individual of a species with n0 individuals
given a randomly sampled quadrat of area A out of a total area A0.
This function uses the non-iterative scaling model
i: number of bisections
n0: abundance
"""
if i == 0:
lambda_mete = 1
if i != 0:
A0 = 2 ** i
lambda_mete = 1 - get_mete_Pi(0, 1, n0, A0)
return(lambda_mete)
def get_lambda_bisect(i, n0, psi):
"""
Probability of observing at least one individual of a species with n0 individuals
given a randomly sampled quadrat of area A out of a total area A0.
i: number of bisections
n0: abundance
psi: aggregation parameter {0, 1}
"""
if i == 0:
lambda_bisect = 1
if i != 0:
A0 = 2 ** i
lambda_bisect = 1 - bisect_prob(0, 1, n0, A0, psi)
return(lambda_bisect)
def chi_heap(i, j, n0, chi_dict={}):
"""
calculates the commonality function for a given degree of bisection (i) at
orders of seperation (j)
Harte 2007, Scaling Biodiveristy Chp. Eq. 6.10, pg.113
i: number of bisections
j: order of seperation
"""
if n0 == 1:
out = 0
else:
key = (i, j, n0)
if key not in chi_dict:
if j == 1:
chi_dict[key] = (n0 + 1)**-1 * sum(map(lambda m: get_lambda_heap(i - 1, m) *
get_lambda_heap(i - 1, n0 - m), range(1, n0)))
else:
i -= 1
j -= 1
chi_dict[key] = (n0 + 1)**-1 * sum(map(lambda m: chi_heap(i, j, m, chi_dict), range(2, n0 + 1)))
out = chi_dict[key]
return(out)
def chi_bisect(i, j, n0, psi, chi_dict={}):
"""
calculates the commonality function for a given degree of bisection (i) at
orders of seperation (j) which a specific level of aggregation.
i: number of bisections
j: order of seperation
n0: abundance
psi: aggregation parameter {0, 1}
Note: Function has only been checked at psi = .5, more testing is needed here
"""
if n0 == 1:
out = 0
else:
key = (i, j, n0, psi)
if key not in chi_dict:
if j == 1:
chi_dict[key] = sum(map(lambda m: single_prob(m, n0, psi) *
get_lambda_bisect(i - 1, m, psi) *
get_lambda_bisect(i - 1, n0 - m, psi), range(1, n0)))
else:
i -= 1
j -= 1
chi_dict[key] = sum(map(lambda m: single_prob(m, n0, psi) *
chi_bisect(i, j, m, psi, chi_dict) , range(2, n0 + 1)))
out = chi_dict[key]
return(out)
def sep_orders(i, shape='sqr'):
"""
Arguments:
i: number of bisections or scale of A relative to A0
shape: sqr, rect, or golden to indicate that A0 is a
square, rectangle, or golden rectangle, respectively
Note: golden rectangle has the dimensions L x L(2^.5)
Returns:
seperation orders in which the number of bisections is
shape preserving
"""
if shape == 'golden':
j = range(i, 0, -1)
if shape == 'sqr':
j = range(i, 0, -1)
even_indices = which([index == 0 for index in map(lambda j: j % 2, j)])
j = [j[index] for index in even_indices]
if shape == 'rect':
j = range(i, 0, -1)
odd_indices = which([index == 1 for index in map(lambda j: j % 2, j)])
j = [j[index] for index in odd_indices]
return j
def calc_D(j, L=1):
"""
Distance calculation given serperation order
that are shape preserving
From Ostling et al. (2004) pg. 630
j: seperation order
L: width of rectangle or square of area A0
"""
D = L / 2**(j / 2)
return D
def sor_heap(A, A0, S0, N0, shape='sqr', L=1):
"""
Computes sorensen's simiarilty index using the truncated logseries SAD
given spatial grain (A) at all possible seperation distances
Scaling Biodiveristy Chp. Eq. 6.10, pg.113
Also see Plotkin and Muller-Landau (2002), Eq. 10 which
demonstrates formulation of sorensen for this case in which
the abuance distribution is specified but the realized abundances are unknown
shape: shape of A0 see function sep_orders()
L: the width of the rectangle or square area A0
"""
beta = get_beta(S0, N0)
i = int(log(A0 / A, 2))
j = sep_orders(i, shape)
L = [L] * len(j)
d = map(calc_D, j, L)
chi = np.empty((N0, len(d)))
lambda_vals = np.empty((N0, len(d)))
for n in range(1, N0 + 1):
## Eq. 7.32 in Harte 2009
prob_n_indiv = exp(-beta * n) / (n * log(beta ** -1))
chi_tmp = map(lambda jval: chi_heap(i, jval, n), j)
lambda_tmp = [get_lambda_heap(i, n)] * len(d)
chi[n - 1, ] = [prob_n_indiv * x for x in chi_tmp]
lambda_vals[n - 1, ] = [prob_n_indiv * x for x in lambda_tmp]
sor = map(lambda col: sum(chi[:, col]) / sum(lambda_vals[:, col]), range(0, len(d)))
i = [i] * len(j)
out = np.array([i, j, d, sor]).transpose()
return out
def sor_heap_fixed_abu(A, n0, A0, shape='sqr', L=1):
"""
Computes sorensen's simiarilty index for a given SAD (n0)
and spatial grain (A) at all possible seperation distances
Scaling Biodiveristy Chp. Eq. 6.10, pg.113
shape: shape of A0 see function sep_orders()
L: the width of the rectangle or square area A0
"""
if isinstance(n0, (int, long)):
n0 = [n0]
n0_unique = list(set(n0))
n0_uni_len = len(n0_unique)
n0_count = [n0.count(x) for x in n0_unique]
i = int(log(A0 / A, 2))
j = sep_orders(i, shape)
L = [L] * len(j)
d = map(calc_D, j, L)
chi = np.empty((n0_uni_len, len(d)))
lambda_vals = np.empty((n0_uni_len, len(d)))
for sp in range(0, n0_uni_len):
chi_tmp = map(lambda jval: chi_heap(i, jval, n0_unique[sp]), j)
lambda_tmp = [get_lambda_heap(i, n0_unique[sp])] * len(d)
chi[sp, ] = [n0_count[sp] * x for x in chi_tmp]
lambda_vals[sp, ] = [n0_count[sp] * x for x in lambda_tmp]
sor = map(lambda col: sum(chi[:, col]) / sum(lambda_vals[:, col]), range(0, len(d)))
i = [i] * len(j)
out = np.array([i, j, d, sor]).transpose()
return out
def sor_bisect(A, A0, S0, N0, psi, shape='sqr', L=1):
"""
Computes sorensen's simiarilty index using the truncated logseries SAD
given spatial grain (A) at all possible seperation distances
Scaling Biodiveristy Chp. Eq. 6.10, pg.113
Also see Plotkin and Muller-Landau (2002), Eq. 10 which
demonstrates formulation of sorensen for this case in which
the abuance distribution is specified but the realized abundances are unknown
psi: aggregation parameter {0, 1}
shape: shape of A0 see function sep_orders()
L: the width of the rectangle or square area A0
"""
beta = get_beta(S0, N0)
i = int(log(A0 / A, 2))
j = sep_orders(i, shape)
L = [L] * len(j)
d = map(calc_D, j, L)
chi = np.empty((N0, len(d)))
lambda_vals = np.empty((N0, len(d)))
for n in range(1, N0 + 1):
## Eq. 7.32 in Harte 2009
prob_n_indiv = exp(-beta * n) / (n * log(beta ** -1))
chi_tmp = map(lambda jval: chi_bisect(i, jval, n, psi), j)
lambda_tmp = [get_lambda_bisect(i, n, psi)] * len(d)
chi[n - 1, ] = [prob_n_indiv * x for x in chi_tmp]
lambda_vals[n - 1, ] = [prob_n_indiv * x for x in lambda_tmp]
sor = map(lambda col: sum(chi[:, col]) / sum(lambda_vals[:, col]), range(0, len(d)))
i = [i] * len(j)
psi = [psi] * len(j)
out = np.array([psi, i, j, d, sor]).transpose()
return out
def sor_bisect_fixed_abu(A, n0, A0, psi, shape='sqr', L=1):
"""
Computes sorensen's simiarilty index for a given SAD (n0)
and spatial grain (A) at all possible seperation distances
Scaling Biodiveristy Chp. Eq. 6.10, pg.113
psi: aggregation parameter {0, 1}
shape: shape of A0 see function sep_orders()
L: the width of the rectangle or square area A0
"""
if isinstance(n0, (int, long)):
n0 = [n0]
n0_unique = list(set(n0))
n0_uni_len = len(n0_unique)
n0_count = [n0.count(x) for x in n0_unique]
i = int(log(A0 / A, 2))
j = sep_orders(i, shape)
L = [L] * len(j)
d = map(calc_D, j, L)
chi = np.empty((n0_uni_len, len(d)))
lambda_vals = np.empty((n0_uni_len, len(d)))
for sp in range(0, n0_uni_len):
chi_tmp = map(lambda jval: chi_bisect(i, jval, n0_unique[sp], psi), j)
lambda_tmp = [get_lambda_bisect(i, n0_unique[sp], psi)] * len(d)
chi[sp, ] = [n0_count[sp] * x for x in chi_tmp]
lambda_vals[sp, ] = [n0_count[sp] * x for x in lambda_tmp]
sor = map(lambda col: sum(chi[:, col]) / sum(lambda_vals[:, col]), range(0, len(d)))
i = [i] * len(j)
psi = [psi] * len(j)
out = np.array([psi, i, j, d, sor]).transpose()
return out
def sim_spatial_one_step(abu_list):
"""Simulates the abundances of species after bisecting one cell.
Input: species abundances in the original cell.
Output: a list with two sublists containing species abundances in the two
halved cells.
Assuming indistinguishable individuals (see Harte et al. 2008).
"""
abu_half_1 = []
abu_half_2 = []
for spp in abu_list:
if spp == 0:
abu_half_1.append(0)
abu_half_2.append(0)
else:
abu_1 = random_integers(0, spp)
abu_half_1.append(abu_1)
abu_half_2.append(spp - abu_1)
abu_halves = [abu_half_1, abu_half_2]
return abu_halves
def sim_spatial_whole(S, N, bisec, transect=False, abu=None, beta=None):
"""Simulates species abundances in all cells given S & N at whole plot
level and bisection number.
Keyword arguments:
S -- the number of species
N -- the number of individuals
bisec -- the number of bisections to carry out (see Note below)
transect -- boolean, if True a 1-dimensional spatial community is
generated, the default is to generate a spatially 2-dimensional
community
abu -- an optional abundance vector that can be supplied for the community
instead of using log-series random variates
Output: a list of lists, each sublist contains species abundance in one
cell, and x-y coordinate of the cell.
Note: bisection number 1 corresponds to first bisection which intersects the x-axis
"""
if S == 1:
abu = [N]
if abu is None:
if beta is None:
p = exp(-get_beta(S, N))
else:
p = exp(-beta)
abu = trunc_logser_rvs(p, N, size=S)
abu_prev = [[1, 1, array(abu)]]
bisec_num = 0
while bisec_num < bisec:
abu_new = []
for cell in abu_prev:
x_prev = cell[0]
y_prev = cell[1]
abu_new_cell = sim_spatial_one_step(cell[2])
if transect:
cell_new_1 = [x_prev * 2 - 1, y_prev, abu_new_cell[0]]
cell_new_2 = [x_prev * 2, y_prev, abu_new_cell[1]]
else:
if bisec_num % 2 == 0:
cell_new_1 = [x_prev * 2 - 1, y_prev, abu_new_cell[0]]
cell_new_2 = [x_prev * 2, y_prev, abu_new_cell[1]]
else:
cell_new_1 = [x_prev, y_prev * 2 - 1, abu_new_cell[0]]
cell_new_2 = [x_prev, y_prev * 2, abu_new_cell[1]]
abu_new.append(cell_new_1)
abu_new.append(cell_new_2)
abu_prev = abu_new
bisec_num += 1
return abu_prev
def sim_spatial_whole_iter(S, N, bisec, coords, n_iter = 10000):
"""Simulates the bisection n_iter times and gets the aggregated species
richness in plots with given coordinates."""
max_x = 2 ** ceil((bisec - 1) / 2)
max_y = 2 ** floor((bisec - 1) / 2)
if max(array(coords)[:,0]) > max_x or max(array(coords)[:,1]) > max_y:
print("Error: Coordinates out of bounds.")
return float('nan')
else:
i = 1
S_list = []
while i <= n_iter:
abu_list = []
abu_plot = sim_spatial_whole(S, N, bisec)
for i_coords in coords:
for j_cell in abu_plot:
if j_cell[0] == i_coords[0] and j_cell[1] == i_coords[1]:
abu_list.append(j_cell[2])
break
abu_agg = array(abu_list).sum(axis = 0)
S_i = sum(abu_agg != 0)
S_list.append(S_i)
i += 1
S_avg = sum(S_list) / len(S_list)
return S_avg
def community_energy_pdf(epsilon, S0, N0, E0):
lambda1 = get_lambda1()
lambda2 = get_lambda2()
gamma = lambda1 + epsilon * lambda2
exp_neg_gamma = exp(-gamma)
return S0 / N0 * (exp_neg_gamma / (1 - exp_neg_gamma) ** 2 -
exp_neg_gamma ** N0 / (1 - exp_neg_gamma) *
(N0 + exp_neg_gamma / (1 - exp_neg_gamma)))
def which(boolean_list):
""" Mimics the R function 'which()' and it returns the indics of the
boolean list that are labeled True """
return [i for i in range(0, len(boolean_list)) if boolean_list[i]]
def order(num_list):
"""
This function mimics the R function 'order()' and it carries out a
bubble sort on a list and an associated index list.
The function only returns the index list so that the order of the sorting
is returned but not the list in sorted order
Note: [x[i] for i in order(x)] is the same as sorted(x)
"""
num_list = list(num_list)
list_length = len(num_list)
index_list = range(0, list_length)
swapped = True
while swapped:
swapped = False
for i in range(1, list_length):
if num_list[i-1] > num_list[i]:
temp1 = num_list[i-1]
temp2 = num_list[i]
num_list[i-1] = temp2
num_list[i] = temp1
temp1 = index_list[i-1]
temp2 = index_list[i]
index_list[i-1] = temp2
index_list[i] = temp1
swapped = True
return index_list
def single_rvs(n0, psi, size=1):
"""Generate random deviates from the single division model, still
is not working properly possibily needs to be checked"""
cdf = single_cdf(1,n0,2,psi)
xvals = [0] * size
for i in range(size):
rand_float = uniform(0,1)
temp_cdf = list(cdf + [rand_float])
ordered_values = order(temp_cdf)
xvals[i] = [j for j in range(0,len(ordered_values)) if ordered_values[j] == (n0 + 1)][0]
return xvals
def get_F(a, n):
"""
Eq. 7 in Conlisk et al. (2007)
gamma(a + n) / (gamma(a) * gamma(n + 1))
"""
return mp.gamma(a + n) / (mp.gamma(a) * mp.gamma(n + 1))
def single_prob(n, n0, psi, c=2):
"""
Eq. 1.3 in Conlisk et al. (2007), note that this implmentation is
only correct when the variable c = 2
Note: if psi = .5 this is the special HEAP case in which the
function no longer depends on n.
c = number of cells
"""
a = (1 - psi) / psi
F = (get_F(a, n) * get_F((c - 1) * a, n0 - n)) / get_F(c * a, n0)
return float(F)
def single_cdf(n0, psi):
cdf = [0.0] * (n0 + 1)
for n in range(0, n0 + 1):
if n == 0:
cdf[n] = single_prob(n, n0, psi)
else:
cdf[n] = cdf[n - 1] + single_prob(n, n0, psi)
return cdf
| mit |
lalten/rpg_dvs_ros | dvs_calibration/scripts/pick.py | 1 | 7816 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from dvs_msgs.msg import ImageObjectPoints
import Tkinter
import tkMessageBox
import ImageTk
import Image as Imagepy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
class Picker(object):
"""
pick messages out of topic, which should be forwarded
user will be ask every time a new message arrives, if he wants to take it
the user can verify, that the LED pattern was correctly detected
and prevent wrong calibration results caused by wrong patterns
"""
def __init__(self):
self.imageObjectPointsPub = rospy.Publisher('out/image_object_points', ImageObjectPoints, queue_size=1)
self.lastImageOjectPoints = False
self.lastImage = False
self.ignorePub = rospy.Publisher('out/ignore', String, queue_size=1)
self.takePub = rospy.Publisher('out/take', String, queue_size=1)
#some default size
self.lastImageSize = (512,512)
self.title = rospy.get_namespace() + ' - ' + rospy.get_name()
self.isStereoSlave = bool(rospy.get_param('~stereoslave',False))
rospy.loginfo("am i stereo? %d",self.isStereoSlave)
self.root = Tkinter.Tk()
self.root.wm_title(self.title)
self.label = Tkinter.Label(self.root, text='last pattern')
self.label.pack()
self.imgtk = None
self.labelWaitStr = Tkinter.StringVar()
self.labelWait = Tkinter.Label(self.root, textvariable=self.labelWaitStr)
self.labelWait.pack()
if self.isStereoSlave is not True:
self.addButton = Tkinter.Button(self.root, text='Add Pattern', command=self.takeData)
self.addButton.pack()
self.ignoreButton = Tkinter.Button(self.root, text='Ignore Pattern', command=self.ignoreData)
self.ignoreButton.pack()
self.clearImage()
self.stopWaitingForDecision()
def startListener(self):
self.points_sub = rospy.Subscriber("in/image_object_points", \
ImageObjectPoints, self.pointsCallback)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("in/image_pattern",Image,self.imageCallback)
self.ignoreSub = rospy.Subscriber("in/ignore",String,self.ignoreCallback)
self.takeSub = rospy.Subscriber("in/take",String,self.takeCallback)
def ignoreCallback(self,msg):
"""
in stereo setup, reset callback to sync both windows
"""
rospy.loginfo("got reset callback on topic")
#reset everything
self.ignoreData()
def takeCallback(self,msg):
rospy.loginfo("got take callback on topic")
#reset everything
self.takeData()
def imageCallback(self,data):
rospy.loginfo(rospy.get_caller_id() + "I got an image")
if self.waitingForDecision is True:
rospy.loginfo(rospy.get_caller_id() + "ignore image (waiting for decision)")
return
self.lastImage = data
def showImage(self):
if self.lastImage is not False:
try:
image_bgr8 = self.bridge.imgmsg_to_cv2(self.lastImage, "bgr8")
self.lastImageSize = image_bgr8.shape[:2]
except CvBridgeError as e:
print(e)
cv2image = cv2.cvtColor(image_bgr8, cv2.COLOR_BGR2RGBA)
im = Imagepy.fromarray(cv2image)
#im = Imagepy.fromstring('RGB', cv2.GetSize(self.lastImage), self.lastImage.tostring())
# Convert the Image object into a TkPhoto object
self.imgtk = ImageTk.PhotoImage(image=im)
# Put it in the display window
#Tkinter.Label(root, image=imgtk).pack()
self.label.config(image=self.imgtk)
rospy.sleep(0.05)
def clearImage(self):
#create new image of size x
im = Imagepy.new('RGB', self.lastImageSize, 'black')
# Convert the Image object into a TkPhoto object
self.imgtk = ImageTk.PhotoImage(image=im)
#show image
self.label.config(image=self.imgtk)
rospy.sleep(0.05)
def pointsCallback(self,data):
rospy.loginfo(rospy.get_caller_id() + "got points callback")
if self.waitingForDecision is True:
rospy.loginfo(rospy.get_caller_id() + "ignore points (waiting for decision)")
return
if self.lastImage is False:
rospy.loginfo(rospy.get_caller_id() + "did not yet receive an image")
return
#check if time difference to image is too large
#image should always come before points
me = data.header.stamp
diff = me - self.lastImage.header.stamp
#usually about 0.001537061
maxDiff = rospy.Duration.from_sec(0.01)
if diff > maxDiff:
print("time difference was to much, do not take it", diff.to_sec())
self.ignoreData()
return
#we got an image, that is not too long ago
#so perfect, lets go
#and hope, they belong toegther
self.nowWaitForDecision()
#store data
self.lastImageOjectPoints = data
#self.scatterplot(data.image_points)
self.showImage()
def nowWaitForDecision(self):
self.waitingForDecision = True
self.labelWaitStr.set('waiting for decision')
if self.isStereoSlave is not True:
self.addButton.config(state="normal")
self.ignoreButton.config(state="normal")
def stopWaitingForDecision(self):
self.waitingForDecision = False
if self.isStereoSlave is not True:
self.addButton.config(state="disabled")
self.ignoreButton.config(state="disabled")
#reset to false, so wait until we got object points
#followed by an image
#then wait for decision
self.lastImageOjectPoints = False
self.labelWaitStr.set('waiting for pattern')
def ignoreData(self):
rospy.loginfo("ignore data")
msg = String('take')
self.ignorePub.publish(msg)
self.clearImage()
self.stopWaitingForDecision()
def takeData(self):
if self.waitingForDecision is False:
rospy.loginfo("do not take data, not complete")
return
rospy.loginfo("take data and publish it")
self.imageObjectPointsPub.publish(self.lastImageOjectPoints)
msg = String('take')
self.takePub.publish(msg)
self.clearImage()
self.stopWaitingForDecision()
def scatterplot(self,points):
import matplotlib.pyplot as plt
import random
import numpy as np
#in images ---x-->
# and y downwards
# |
# \/
x = []
y = []
for i in points:
x.append(i.x)
y.append(128-i.y)
plt.axis((0,128,0,128))
#get random color between 0 and 1
#and use it for all points
#TODO: not working yet
colors = (100 * random.random()) * np.ones(len(x))
plt.title(self.title)
plt.scatter(x, y, c=colors, alpha=0.5)
#make interactive plot, so it is nonblocking
plt.ion()
#show data witout clearing image
#so we add up the examples
plt.show()
def run(self):
#start main loop of tkinter
self.root.mainloop()
def main():
rospy.init_node('picker', anonymous=True)
p = Picker()
p.startListener()
try:
# spin() simply keeps python from exiting until this node is stopped
p.run()
except KeyboardInterrupt:
print("shutting down")
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
Leguark/GeMpy | GeMpy/Visualization.py | 1 | 4737 | """
Module with classes and methods to visualized structural geology data and potential fields of the regional modelling based on
the potential field method.
Tested on Ubuntu 14
Created on 23/09/2016
@author: Miguel de la Varga
"""
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
# TODO: inherit pygeomod classes
#import sys, os
class PlotData(object):
"""Object Definition to perform Bayes Analysis"""
def __init__(self, _data, block=None, **kwargs):
"""
:param _data:
:param kwds: potential field, block
"""
self._data = _data
if block:
self._block = block
if 'potential_field' in kwargs:
self._potential_field_p = kwargs['potential_field']
# TODO planning the whole visualization scheme. Only data, potential field and block. 2D 3D? Improving the iteration
# with pandas framework
self._set_style()
def _set_style(self):
plt.style.use(['seaborn-white', 'seaborn-paper'])
matplotlib.rc("font", family="Times New Roman")
def plot_data(self, direction="y", serie="all", **kwargs):
"""
Plot the projection of all data
:param direction:
:return:
"""
x, y, Gx, Gy = self._slice(direction)[4:]
if serie == "all":
series_to_plot_i = self._data.Interfaces[self._data.Interfaces["series"].
isin(self._data.series.columns.values)]
series_to_plot_f = self._data.Foliations[self._data.Foliations["series"].
isin(self._data.series.columns.values)]
else:
series_to_plot_i = self._data.Interfaces[self._data.Interfaces["series"] == serie]
series_to_plot_f = self._data.Foliations[self._data.Foliations["series"] == serie]
sns.lmplot(x, y,
data=series_to_plot_i,
fit_reg=False,
hue="formation",
scatter_kws={"marker": "D",
"s": 100},
legend=True,
legend_out=True,
**kwargs)
# Plotting orientations
plt.quiver(series_to_plot_f[x], series_to_plot_f[y],
series_to_plot_f[Gx], series_to_plot_f[Gy],
pivot="tail")
plt.xlabel(x)
plt.ylabel(y)
def _slice(self, direction, cell_number=25):
_a, _b, _c = slice(0, self._data.nx), slice(0, self._data.ny), slice(0, self._data.nz)
if direction == "x":
_a = cell_number
x = "Y"
y = "Z"
Gx = "G_y"
Gy = "G_z"
extent_val = self._data.ymin, self._data.ymax, self._data.zmin, self._data.zmax
elif direction == "y":
_b = cell_number
x = "X"
y = "Z"
Gx = "G_x"
Gy = "G_z"
extent_val = self._data.xmin, self._data.xmax, self._data.zmin, self._data.zmax
elif direction == "z":
_c = cell_number
x = "X"
y = "Y"
Gx = "G_x"
Gy = "G_y"
extent_val = self._data.xmin, self._data.xmax, self._data.ymin, self._data.ymax
else:
raise AttributeError(str(direction) + "must be a cartesian direction, i.e. xyz")
return _a, _b, _c, extent_val, x, y, Gx, Gy
def plot_block_section(self, cell_number=13, direction="y", **kwargs):
plot_block = self._block.get_value().reshape(self._data.nx, self._data.ny, self._data.nz)
_a, _b, _c, extent_val, x, y = self._slice(direction, cell_number)[:-2]
plt.imshow(plot_block[_a, _b, _c].T, origin="bottom", cmap="viridis",
extent=extent_val,
interpolation="none", **kwargs)
plt.xlabel(x)
plt.ylabel(y)
def plot_potential_field(self, cell_number, potential_field=None, n_pf=0,
direction="y", plot_data=True, serie="all", **kwargs):
if not potential_field:
potential_field = self._potential_field_p[n_pf]
if plot_data:
self.plot_data(direction, self._data.series.columns.values[n_pf])
_a, _b, _c, extent_val, x, y = self._slice(direction, cell_number)[:-2]
plt.contour(potential_field[_a, _b, _c].T,
extent=extent_val,
**kwargs)
if 'colorbar' in kwargs:
plt.colorbar()
plt.title(self._data.series.columns[n_pf])
plt.xlabel(x)
plt.ylabel(y)
def export_vtk(self):
"""
export vtk
:return:
""" | mit |
tomlof/scikit-learn | sklearn/cluster/tests/test_k_means.py | 26 | 32656 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = X.astype(dtype)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input
# data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# ensure the extracted row is a 2d array
assert_equal(estimator.predict(X_test[:1]),
estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after
# partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal
# place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
def test_sparse_k_means_init_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X).cluster_centers_
)
# The same should be true when X is sparse
X_sparse = sp.csr_matrix(X)
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X_sparse).cluster_centers_
)
def test_sparse_validate_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=4).fit(X).cluster_centers_
# Test that a ValueError is raised for validate_center_shape
classifier = KMeans(n_clusters=3, init=centers, n_init=1)
msg = "The shape of the initial centers \(\(4L?, 4L?\)\) " \
"does not match the number of clusters 3"
assert_raises_regex(ValueError, msg, classifier.fit, X)
| bsd-3-clause |
openbermuda/karmapi | karmapi/widgets.py | 1 | 3345 | """
Widgets for pig
"""
from datetime import datetime
import PIL
from collections import deque
from karmapi import pigfarm, base
import curio
import pandas
np = pandas.np
from matplotlib import ticker
from numpy import random
import math
PI = math.pi
class Circle(pigfarm.MagicCarpet):
def compute_data(self):
r = 50
self.x = range(-50, 51)
self.y = [(((r * r) - (x * x)) ** 0.5) for x in self.x]
def plot(self):
self.axes.hold(True)
self.axes.plot(self.x, self.y)
self.axes.plot(self.x, [-1 * y for y in self.y])
class Friday(pigfarm.MagicCarpet):
def compute_data(self):
#self.data = random.randint(0, 100, size=100)
self.data = list(range(100))
def plot(self):
self.axes.plot(self.data)
class MapPoints(pigfarm.MagicCarpet):
def compute_data(self):
self.df = base.load(self.path)
def plot(self):
""" See Maps.plot_points_on_map """
self.df.plot(axes=self.axes)
self.axes.plot(self.data)
class InfinitySlalom(pigfarm.MagicCarpet):
def compute_data(self):
#self.data = random.randint(0, 100, size=100)
self.waves_start = random.randint(5, 10)
self.waves_end = random.randint(32, 128)
nwaves = random.randint(self.waves_start, self.waves_end)
self.x = np.linspace(
0,
nwaves,
512) * PI
self.y = np.sin(self.x / PI) * (64 * PI)
def plot(self):
#selector = pig.win_curio_fix()
#curio.run(self.updater(), selector=selector)
pass
async def get_source(self):
return await self.farm.micks.get()
async def start(self):
# FIXME: let a data source such as a mick drive the animation
#self.mick = await self.get_source()
pass
async def run(self):
""" Run the animation
Loop forever updating the figure
A little help sleeping from curio
"""
self.fig.clear()
self.axes = self.fig.add_subplot(111)
while True:
#data = await mick.get()
#print('infinite data:', len(data))
await curio.sleep(self.sleep)
if random.random() < 0.25:
print('clearing axes', flush=True)
self.axes.clear()
self.compute_data()
colour = random.random()
n = len(self.x)
background = np.ones((n, n))
background *= colour
background[0, 0] = 0.0
background[n-1, n-1] = 1.0
for curve in range(random.randint(3, 12)):
self.axes.fill(self.x, self.y * 1 * random.random(),
alpha=0.3)
self.axes.fill(self.x, self.y * -1 * random.random(),
alpha=0.3)
self.axes.imshow(background, alpha=0.1, extent=(
0, 66 * PI, -100, 100))
self.draw()
await curio.sleep(1)
def get_widget(path):
parts = path.split('.')
if len(parts) == 1:
pig_mod = sys.modules[__name__]
return base.get_item(path, pig_mod)
return base.get_item(path)
| gpl-3.0 |
kostyfisik/fdtd-1d | fdtd-step1a-simple-ABC.py | 1 | 2937 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2015 Konstantin Ladutenko <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# 1D FDTD vanish E field
# Based on Understanding the Finite-Difference Time-Domain Method, John
# B. Schneider, www.eecs.wsu.edu/~schneidj/ufdtd, 2010.
import numpy as np
import math as m
import matplotlib.pyplot as plt
from time import sleep
imp0=377.0 # Free space impedance
size=1800 # Domain size
#Dielectric distribution
epsilon = 1
eps= np.ones(size)
eps[:] = epsilon
# setting ABC constants _AFTER_ epsilon (we need speed of ligth in media)
# Taflove, eq. 6.35
c = 1/np.sqrt(epsilon)
a = (c-1)/(c+1)
b = 2/(c + 1)
# Left boundary
wl_nm1,wl_n,wl_np1 = 0,0,0 # Field at x=0 at time steps n-1, n, n+1
wlp1_nm1,wlp1_n,wlp1_np1 = 0,0,0 # Field at x=1 at time steps n-1, n, n+1
# Right boundary
wr_nm1,wr_n,wr_np1 = 0,0,0 # Field at x=size at time steps n-1, n, n+1
wrm1_nm1,wrm1_n,wrm1_np1 = 0,0,0 # Field at x=size-1 at time steps n-1, n, n+1
#Source
source_width = 30.0*np.sqrt(epsilon)
#source_width = size*np.sqrt(epsilon)
delay = 10*source_width
source_x = int(1.0*size/2.0) #Source position
def source(current_time, delay, source_width):
return m.exp(-(current_time-delay)**2/(2.0 * source_width**2))
#Model
total_steps = int((size+delay)*np.sqrt(epsilon)) # Time stepping
frame_interval = int(total_steps/30.0)
all_steps = np.linspace(0, size-1, size)
#Inital field E_z and H_y is equal to zero
ez = np.zeros(size)
hy = np.zeros(size)
x = np.arange(0,size-1,1)
#print(x)
for time in xrange(total_steps):
######################
#Magnetic field
######################
hy[-1] = hy[-2]
hy[x] = hy[x] + (ez[x+1] - ez[x])/imp0
######################
#Electric field
######################
ez[0] = ez[1]
ez[x+1] = ez[x+1] + (hy[x+1]-hy[x])*imp0/eps[x+1]
ez[source_x] += source(time, delay, source_width)
######################
# Output
######################
if time % frame_interval == 0:
plt.clf()
plt.title("Ez after t=%i"%time)
plt.plot(all_steps, ez, all_steps, hy*imp0)
plt.savefig("step1-at-time-%i.png"%time,pad_inches=0.02, bbox_inches='tight')
plt.draw()
# plt.show()
plt.clf()
plt.close()
| gpl-3.0 |
IowaRoboticTelescope/Grism-plotter | grism/Jacoby_spectra/plot-jacoby-spectra.py | 1 | 1056 | #!/usr/bin/env python
'''
Reads Standard stellar spectra, plots
Ref: Jacoby G. et al. 1984, A Library of Stellar Spectra, Astrophys. J. Suppl., 56, 257 (1984)
URL: http://cdsarc.u-strasbg.fr/cgi-bin/Cat?III/92
RLM 26 Mar 2015
'''
import matplotlib.pyplot as plt
import numpy as np
fn = open('fluxes.dat')
lines = fn.readlines()
names = []
for line in lines:
name = line[0:9].replace(' ','')
names.append(name)
fluxes = line[10:].split()
unique_names = list(set(names))
# Build dictionary structure using stars as keys
data = {}
for name in unique_names:
flux = []
for line in lines:
if name == line[0:9].replace(' ',''):
fluxline = [float(line[i:i+10])*1.e13 for i in range(10,len(line)-1,10)]
flux += fluxline
data[name] = flux
star = 'HD116608'
plt.figure(1,figsize=(12,8))
y = np.array(data[name])
lambda1 = 351.0
x = 0.14*np.arange(len(y))
x += 351.0
plt.plot(x,y,'r-')
plt.xlabel('Wavelength (nm)')
plt.ylabel(r'Flux (erg cm$^{-2}$ s$^{-1}$ Angstrom$^{-1}$ $\times\ 10^{13}$)')
plt.title('%s' % star)
plt.grid(True)
plt.show()
| gpl-3.0 |
srowen/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 18 | 20955 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return float(v + 1)
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)).sort('plus_one(id)')
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)).sort('plus_one(id)')
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
# groupby one expression and one python UDF
result6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum_udf(df.v)).sort(['(v % 2)', 'plus_one(id)']))
expected6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum(df.v)).sort(['(v % 2)', 'plus_one(id)']))
# groupby one expression and one scalar pandas UDF
result7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v)).sort(['sum(v)', 'plus_two(id)']))
expected7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum(df.v)).sort(['sum(v)', 'plus_two(id)']))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEqual(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register('max_udf', max_udf)
with self.tempView("table"):
df.createTempView('table')
agg1 = df.agg(max_udf(df['id']))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf('float', PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([
Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)
])
agg = df.groupBy('id').agg(mean('foo').alias("mean"))
filtered = agg.filter(agg['mean'] > 40.0)
assert(filtered.collect()[0]["mean"] == 42.0)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
jason-neal/companion_simulations | Notebooks/Extraction_with_same_snr.py | 1 | 9688 |
# coding: utf-8
# # Extracting the simulations at same noise level.
#
# What is the distribution of returned parameters when repeating the same simulation (fixed snr also) so only the random noise changes.
#
# This is not very interesting atm as I need more samples which are running on exo currently.
# Some have a distribution while others are at either ends of the parameter grid limits I set.
#
# In[6]:
import re
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import glob
import sqlalchemy as sa
from mingle.utilities.db_utils import load_sql_table
get_ipython().run_line_magic('matplotlib', 'inline')
# In[7]:
import yaml
# Get the range of parameters used in simulation from config file.
def get_sim_grid(path="/home/jneal/Phd/Analysis/sims_variable_params_same_snr/"):
config_file = os.path.join(path, "config.yaml")
with open(config_file) as f:
config = yaml.safe_load(f)
return config["sim_grid"]
sim_grid = get_sim_grid()
print(sim_grid)
# In[15]:
class SnrDistribution(object):
def __init__(self, df, snr, teff):
#ddf.to_numeric(s, errors='coerce')
self.df = df
self.snr = snr
self.teff = teff
self.size = len(self.df)
def mean(self, column, nan=True):
if nan:
return np.nanmean(self.df[column])
else:
return np.mean(self.df[column])
def std(self, column, nan=True):
if nan:
return np.nanstd(self.df[column])
else:
return np.std(self.df[column])
def values(self, column, nan=True):
return self.df[column].values
# In[12]:
class SimReader(object):
def __init__(self, base="/home/jneal/Desktop/Inbox/a/bhm_noise_sim/analysis",
namebase="BSBHMNOISE",
prefix="", mode="bhm", chi2_val="coadd_chi2"):
self.base = base
self.namebase = namebase.upper()
self.prefix = prefix.upper()
if mode in ["iam", "tcm", "bhm"]:
self.mode = mode
else:
raise ValueError("Invalid SimReader mode")
if chi2_val in ["chi2_1", "chi2_2", "chi2_3", "chi2_4", "coadd_chi2"]:
self.chi2_val = chi2_val
else:
raise ValueError("Invalid chi2_val.")
def load_results(self, teff, snr):
starname = "{}{}{}N{}".format(self.prefix, self.namebase, teff, snr)
directory = os.path.join(self.base, starname, self.mode)
df_store = pd.DataFrame()
# print(directory)
dbs = glob.glob(os.path.join(directory, "*_coadd_{}_chisqr_results.db".format(self.mode)))
for dbname in dbs:
# print(dbname)
match = re.search("N\d{1,4}-(\d{1,5})_coadd_"+"{}".format(self.mode), dbname, flags=0)
obsnum = match.group(1)
try:
table = load_sql_table(dbname, verbose=False, echo=False)
dbdf = pd.read_sql(sa.select(table.c).order_by(table.c[self.chi2_val].asc()).limit(1), table.metadata.bind)
dbdf["snr"] = snr # Add SNR column
dbdf["obsnum"] = obsnum # Add Obsnum column
df_store = dbdf.append(df_store)
except Exception as e:
print(e)
print(f"Didn't get Database for teff={teff}-snr={snr}-obsnum={obsnum}")
# Coerce to be numeric columns
c = df_store.columns[df_store.dtypes.eq(object)]
df_store[c] = df_store[c].apply(pd.to_numeric, errors='coerce', axis=0)
return SnrDistribution(df_store, snr=snr, teff=teff)
# In[17]:
import matplotlib.lines as lines
import matplotlib.colors as colors
lines_array = list(lines.lineStyles.keys())
colors_array = list(colors.cnames.keys())
def analysis_param(df_store, param, index, individual=False, values=False):
if individual:
plt.figure()
for i, t in enumerate(df_store):
# print(i, t, len(t))
means = [df.mean(param) for df in t]
std = [df.std(param) for df in t]
label = "{}".format(index[i])
if label == "":
label = "None"
plt.errorbar(noises, means, std, label = label, ls=lines_array[i])
if values:
values = [df.values(param) for df in t]
for n, noise in noises:
plt.plot(noise*np.ones_like(values[nn]), values[nn], "o", ls=colors_array[i])
if individual:
plt.xlabel("SNR")
plt.ylabel(param)
plt.legend()
plt.title("Distribution of individual IAM simulated results. {}". format(param))
plt.show()
plt.xlabel("SNR")
plt.ylabel(param)
plt.legend()
plt.title("Distribution of IAM simulated results.")
plt.show()
# In[28]:
noises = [0, 50, 100, 500]
teffs = [3400] #, 3400]
IAMSimReader = SimReader(base="/home/jneal/Desktop/Inbox/a/sims_var_params_same_snr/analysis",
namebase="NEWNOISESCRIPT",
prefix="",
mode="iam")
sim_grid = get_sim_grid(IAMSimReader.base+"/..")
print(sim_grid)
df_store = [[0]*len(noises) for n in range(len(teffs))]
print(df_store)
for i, teff in enumerate(teffs):
print("teff = {}".format(teff))
for j, noise in enumerate(noises):
df = IAMSimReader.load_results(teff, noise)
df_store[i][j] = df
print("Done")
# In[25]:
df_store[0]
# In[26]:
analysis_param(df_store, "teff_1", teffs, individual=False)
analysis_param(df_store, "gamma", teffs, individual=False)
analysis_param(df_store, "rv", teffs, individual=False)
analysis_param(df_store, "teff_2", teffs, individual=False)
analysis_param(df_store, "logg_1", teffs, individual=False)
analysis_param(df_store, "logg_2", teffs, individual=False)
analysis_param(df_store, "feh_1", teffs, individual=False)
analysis_param(df_store, "feh_2", teffs, individual=True)
# In[27]:
analysis_param(df_store, "gamma", teffs, individual=True)
analysis_param(df_store, "teff_1", teffs, individual=True)
analysis_param(df_store, "logg_1", teffs, individual=True)
analysis_param(df_store, "feh_1", teffs, individual=True)
analysis_param(df_store, "gamma", teffs, individual=True)
analysis_param(df_store, "teff_1", teffs, individual=True)
analysis_param(df_store, "logg_1", teffs, individual=True)
analysis_param(df_store, "feh_1", teffs, individual=True)
# In[ ]:
# Older code
# In[ ]:
chi2_val = "coadd_chi2"
import re
def load_fixed_snr(teff, snr):
base = "/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/"
starname = "NOISESCRIPT{}N{}".format(teff, snr)
directory = os.path.join(base, starname, "iam")
df_store = pd.DataFrame()
dbs = glob.glob(os.path.join(directory, "*_coadd_iam_chisqr_results.db"))
print(len(dbs))
for dbname in dbs:
match = re.search("N\d{1,4}-(\d{1,5})_coadd_iam", dbname, flags=0)
obsnum = match.group(1)
try:
table = load_sql_table(dbname, verbose=False, echo=False)
dbdf = pd.read_sql(sa.select(table.c).order_by(table.c[chi2_val].asc()).limit(1), table.metadata.bind)
dbdf["snr"] = snr # Add SNR column
dbdf["obsnum"] = obsnum # Add Obsnum column
df_store = dbdf.append(df_store)
except Exception as e:
print(e)
print(f"Didn't get Database for teff={teff}-snr={snr}-obsnum={obsnum}")
# print("Results")
# print("Host Temperature = 5200 K, Companion Temperature = {}".format(teff))
# df_store["median_alpha"] = df_store.apply(lambda row: np.median([row.alpha_1, row.alpha_2, row.alpha_3, row.alpha_4]), axis=1)
# print(df_store[["snr", "obsnum", "coadd_chi2", "teff_1", "teff_2", "median_alpha"]])
return df_store
# In[ ]:
noises = [0, 20, 50, 100, 1000]
teffs = [2500] #, 3400]
#df_store = [[0]* len(teffs) for n in range(len(noises))]
#print(df_store)
#for i, noise in enumerate(noises):
# print("NOISE = {}".format(noise))
# for j, teff in enumerate(teffs):
# df = load_fixed_snr(teff, noise)
# df_store[i][j] = df
# In[ ]:
def analyse_fixed_snr(df, teff, snr):
sim_grid = get_sim_grid()
teff_1_limits = [t + 5200 for t in sim_grid["teff_1"][0:2]]
teff_2_limits = [t + teff for t in sim_grid["teff_2"][0:2]]
df.plot(x="obsnum", y="teff_1", kind="hist", style="*")
starname = "NOISESCRIPT{}N{}".format(teff, snr)
plt.title(starname)
df.plot(x="obsnum", y="teff_2", kind="hist")
starname = "NOISESCRIPT{}N{}".format(teff, snr)
plt.title(starname)
df.plot(x="obsnum", y="coadd_chi2", kind="hist")
starname = "$\chi^2$ NOISESCRIPT{}N{}".format(teff, snr)
plt.title(starname)
df.plot( y="gamma", kind="hist")
df.plot(x="teff_1", y="gamma", kind="scatter", xlim=teff_1_limits, ylim=sim_grid["gammas"][0:2])
df.plot(x="teff_2", y="gamma", kind="scatter", xlim=teff_2_limits, ylim=sim_grid["gammas"][0:2])
df.plot(x="teff_1", y="rv", kind="scatter", xlim=teff_1_limits, ylim=sim_grid["rvs"][0:2])
df.plot(x="teff_2", y="rv", kind="scatter", xlim=teff_2_limits, ylim=sim_grid["rvs"][0:2])
df.plot(x="teff_1", y="teff_2", kind="scatter", xlim=teff_1_limits, ylim=teff_2_limits)
plt.show()
print("Standard deviations of values\n", df.std(axis=0)[["teff_1", "teff_2", "coadd_chi2", "gamma", "rv"]])
# In[ ]:
for i, noise in enumerate(noises):
print("NOISE = {}".format(noise))
for j, teff in enumerate(teffs):
analyse_fixed_snr(df_store[i][j], teff, noise)
| mit |
RangerKD/rpg_svo | svo_analysis/src/svo_analysis/tum_benchmark_tools/evaluate_ate.py | 22 | 8437 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
label="difference"
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=90)
| gpl-3.0 |
moonbury/pythonanywhere | MasteringPandas/2060_11_Code/run_svm_titanic.py | 3 | 3341 | #!/home/femibyte/local/anaconda/bin/python
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import metrics,svm
from patsy import dmatrix, dmatrices
import re
train_df = pd.read_csv('csv/train.csv', header=0)
test_df = pd.read_csv('csv/test.csv', header=0)
formula1 = 'C(Pclass) + C(Sex) + Fare'
formula2 = 'C(Pclass) + C(Sex)'
formula3 = 'C(Sex)'
formula4 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch'
formula5 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch + C(Embarked)'
formula6 = 'C(Pclass) + C(Sex) + C(Embarked)'
formula7 = 'C(Pclass) + C(Sex) + Age + Parch + C(Embarked)'
formula8 = 'C(Pclass) + C(Sex) + SibSp + Parch + C(Embarked)'
formula_map = {'PClass_Sex_Fare' : formula1,
'PClass_Sex' : formula2,
'Sex' : formula3,
'PClass_Sex_Age_Sibsp_Parch' : formula4,
'PClass_Sex_Age_Sibsp_Parch_Embarked' : formula5
}
#formula_map={'PClass_Sex_Embarked' : formula6}
formula_map = {'PClass_Sex_SibSp_Parch_Embarked' : formula8}
kernel_types=['linear','rbf','poly']
kernel_types=['poly']
#kernel_types=['rbf']
def main():
train_df_filled=fill_null_vals(train_df,'Fare')
train_df_filled=fill_null_vals(train_df_filled,'Age')
assert len(train_df_filled)==len(train_df)
test_df_filled=fill_null_vals(test_df,'Fare')
test_df_filled=fill_null_vals(test_df_filled,'Age')
assert len(test_df_filled)==len(test_df)
for formula_name, formula in formula_map.iteritems():
print "name=%s formula=%s" % (formula_name,formula)
y_train,X_train = dmatrices('Survived ~ ' + formula,
train_df_filled,return_type='dataframe')
print "Running SVM with formula : %s" % formula
print "X_train cols=%s " % X_train.columns
y_train = np.ravel(y_train)
for kernel in kernel_types:
#model = svm.SVC(kernel=kernel,gamma=3)
model = svm.SVC(kernel=kernel)
print "About to fit..."
svm_model = model.fit(X_train, y_train)
print "Kernel: %s" % kernel
print "Training score:%s" % svm_model.score(X_train,y_train)
X_test=dmatrix(formula,test_df_filled)
predicted=svm_model.predict(X_test)
print "predicted:%s\n" % predicted[:5]
assert len(predicted)==len(test_df)
pred_results=pd.Series(predicted,name='Survived')
svm_results=pd.concat([test_df['PassengerId'],pred_results],axis=1)
svm_results.Survived=svm_results.Survived.astype(int)
results_file='csv/svm_%s_%s.csv' % (kernel,formula_name)
#results_file = re.sub('[+ ()C]','',results_file)
svm_results.to_csv(results_file,index=False)
def fill_null_vals(df,col_name):
null_passengers=df[df[col_name].isnull()]
passenger_id_list=null_passengers['PassengerId'].tolist()
df_filled=df.copy()
for pass_id in passenger_id_list:
idx=df[df['PassengerId']==pass_id].index[0]
similar_passengers=df[(df['Sex']==null_passengers['Sex'][idx]) & (df['Pclass']==null_passengers['Pclass'][idx])]
mean_val=np.mean(similar_passengers[col_name].dropna())
df_filled.loc[idx,col_name]=mean_val
return df_filled
if __name__ == '__main__':
main()
| gpl-3.0 |
cbertinato/pandas | pandas/tests/groupby/test_categorical.py | 1 | 39137 | from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import PY37
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_equal, assert_frame_equal, assert_series_equal)
def cartesian_product_for_groupers(result, args, names):
""" Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper """
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(np.arange(len(categories)),
categories=categories,
ordered=a.ordered)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index).sort_index()
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {'min': group.min(),
'max': group.max(),
'count': group.count(),
'mean': group.mean()}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == 'C'
def test_basic():
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = Categorical(x.person_name)
g = x.groupby(['person_id'], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(sum),
df[['a']])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)),
df[['a']])
# Filter
tm.assert_series_equal(
df.a.groupby(c, observed=False).filter(np.all),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).filter(np.all),
df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(sum),
df[['a']])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df[['a']])
# GH 9603
df = DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(
c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
# more basic
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories,
ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(ord_labels, ordered=True,
categories=['foo', 'bar', 'baz', 'qux'])
expected = ord_data.groupby(
exp_cats, sort=False, observed=False).describe()
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8),
levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"]))
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(data=np.arange(2, 12, 2),
index=MultiIndex(levels=[CategoricalIndex(
["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"]))
result = g.get_group('a')
assert_frame_equal(result, expected)
@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636", strict=False)
@pytest.mark.parametrize('ordered', [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(
list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays(
[missing, dense], names=['missing', 'dense'])
expected = DataFrame([0, 1, 2.],
index=idx,
columns=['values'])
result = grouped.apply(lambda x: np.mean(x))
assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype('int')
result = grouped.mean()
assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense],
names=['missing', 'dense'])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df['C'] = ['foo', 'bar'] * 2
# multiple groupers with a non-cat
gb = df.groupby(['A', 'B', 'C'], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ['foo', 'bar'] * 2],
names=['A', 'B', 'C'])
expected = DataFrame({'values': Series(
[1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected,
[cat1, cat2, ['foo', 'bar']],
list('ABC'))
tm.assert_frame_equal(result, expected)
gb = df.groupby(['A', 'B'], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame({'values': [1, 2, 3, 4]},
index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected,
[cat1, cat2],
list('AB'))
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(list('ab'), name="cat",
categories=list('abc'),
ordered=True)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20., 30]},
index=exp_index)
if not observed:
index = CategoricalIndex(list('abc'), name="cat",
categories=list('abc'),
ordered=True)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg('mean')
expected = DataFrame(
{"val": [10, 30, 20, 40],
"cat": Categorical(['a', 'a', 'b', 'b'],
categories=['a', 'b', 'c'],
ordered=True),
"ints": [1, 2, 1, 2]}).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected,
[df.cat.values, [1, 2]],
['cat', 'ints'])
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {'foo': [10, 8, 4, 8, 4, 1, 1], 'bar': [10, 20, 30, 40, 50, 60, 70],
'baz': ['d', 'c', 'e', 'a', 'a', 'd', 'c']}
df = DataFrame(d)
cat = pd.cut(df['foo'], np.linspace(0, 10, 3))
df['range'] = cat
groups = df.groupby(['range', 'baz'], as_index=False, observed=observed)
result = groups.agg('mean')
groups2 = df.groupby(['range', 'baz'], as_index=True, observed=observed)
expected = groups2.agg('mean').reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, 'C2'], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]],
names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5],
"C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected,
[values.values, [1, 2, 3, 4]],
['cat', 'C2'])
result = groups_double_key.agg('mean')
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame({
'cat': np.random.randint(0, 255, size=30000),
'int_id': np.random.randint(0, 255, size=30000),
'other_id': np.random.randint(0, 10000, size=30000),
'foo': 0})
df['cat'] = df.cat.astype(str).astype('category')
grouped = df.groupby(['cat', 'int_id', 'other_id'], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c'])
df = DataFrame({'cat': cat, 'vals': [1, 2, 3]})
g = df.groupby('cat', observed=observed)
result = g.groups
if observed:
expected = {'a': Index([0, 2], dtype='int64'),
'c': Index([1], dtype='int64')}
else:
expected = {'a': Index([0, 2], dtype='int64'),
'b': Index([], dtype='int64'),
'c': Index([1], dtype='int64')}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame({'cat': Categorical(['a', np.nan, 'a'],
categories=['a', 'b', 'd']),
'vals': [1, 2, 3]})
g = df.groupby('cat', observed=observed)
result = g.groups
if observed:
expected = {'a': Index([0, 2], dtype='int64')}
else:
expected = {'a': Index([0, 2], dtype='int64'),
'b': Index([], dtype='int64'),
'd': Index([], dtype='int64')}
tm.assert_dict_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, 'a', np.nan, 'a'],
categories=['a', 'b', 'c'])
s2 = Series([1, 2, 3, 4])
df = DataFrame({'s1': s1, 's2': s2})
result = df.groupby('s1', observed=observed).first().reset_index()
if observed:
expected = DataFrame({'s1': Categorical(['a'],
categories=['a', 'b', 'c']), 's2': [2]})
else:
expected = DataFrame({'s1': Categorical(['a', 'b', 'c'],
categories=['a', 'b', 'c']),
's2': [2, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(['d', 'a', 'b', 'a', 'd', 'b'],
categories=['a', 'b', 'missing', 'd'],
ordered=ordered)
val = Series(['d', 'a', 'b', 'a', 'd', 'b'])
df = DataFrame({'label': label, 'val': val})
# aggregate on the Categorical
result = (df.groupby('label', observed=observed, sort=sort)['val']
.aggregate('first'))
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype='object')
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = 'missing'
if not all(label == aggr):
msg = ('Labels and aggregation results not consistently sorted\n' +
'for (ordered={}, observed={}, sort={})\n' +
'Result:\n{}').format(ordered, observed, sort, result)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index,
categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0),
expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ['foo', 'bar', 'baz', 'qux']
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(
np.repeat(
np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0, observed=False).sum()
expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats', observed=False).sum()
expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
categories=['foo', 'bar', 'baz', 'qux'],
ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame({'a': range(10),
'medium': ['A', 'B'] * 5,
'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(
['artist', 'medium'], observed=False)['a'].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(['A', 'B'], ordered=False,
name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
expected = Series([6, 4], index=Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
with pytest.raises(ValueError):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),
'A': [10, 11, 11],
'B': [101, 102, 103]})
result = df.groupby(['cat', 'A'], as_index=False, observed=True).sum()
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 11],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, 'A']
result = df.groupby(['cat', f], as_index=False, observed=True).sum()
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 22],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(['a', 'b', 'b'], name='cat')
result = df.groupby(['cat', s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ['cat', 'A']
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 11],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
for name in [None, 'X', 'B']:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list('abc')
# ordered=True
df = DataFrame({'A': Categorical(list('ba'),
categories=categories,
ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index, index)
tm.assert_index_equal(
df.groupby('A', sort=False, observed=False).first().index, index)
# ordered=False
df = DataFrame({'A': Categorical(list('ba'),
categories=categories,
ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False)
nosort_index = CategoricalIndex(list('bac'), list('bac'),
ordered=False)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index,
sort_index)
tm.assert_index_equal(
df.groupby('A', sort=False, observed=False).first().index,
nosort_index)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
# single grouper
exp_full = DataFrame({'A': [2.0, 1.0, np.nan],
'B': [25.0, 20.0, np.nan],
'C1': Categorical(list("bac"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("bac"),
categories=list("bac"),
ordered=True)})
for col in ['C1', 'C2']:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(
by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_sort():
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
res = df.groupby(['value_group'], observed=False)['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'], ordered=True)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range', ordered=True)
expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
col = 'range'
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
df['range'] = Categorical(df['range'], ordered=False)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range')
expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',
'(0, 2.5]'],
categories=['(7.5, 10]', '(2.5, 5]',
'(5, 7.5]', '(0, 2.5]'],
name='range')
expected_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
index=index, columns=['foo', 'bar'])
col = 'range'
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt')
col = 'dt'
assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first())
assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first())
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame({"A": Categorical(['a', 'a', 'b'],
categories=['a', 'b', 'c']),
'B': [1, 2, 1]})
expected_idx = CategoricalIndex(['a', 'b', 'c'], name='A')
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame({"A": Categorical(['a', 'a', 'b'],
categories=['a', 'b', 'c']),
'B': [1, 2, 1]})
expected_idx = CategoricalIndex(['a', 'b', 'c'], name='A')
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame({
'key1': Categorical(list('abcbabcba')),
'key2': Categorical(
list(pd.date_range('2018-06-01 00', freq='1T', periods=3)) * 3),
'values': np.arange(9),
})
result = df.groupby(['key1', 'key2']).mean()
idx = MultiIndex.from_product(
[Categorical(['a', 'b', 'c']),
Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))],
names=['key1', 'key2'])
expected = DataFrame(
{'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("as_index, expected", [
(True, Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype='category'),
[1, 2, 2]], names=['a', 'b']
),
data=[1, 2, 3], name='x'
)),
(False, DataFrame({
'a': Series([1, 1, 2], dtype='category'),
'b': [1, 2, 2],
'x': [1, 2, 3]
}))
])
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame({
'a': Series([1, 1, 2], dtype='category'),
'b': [1, 2, 2],
'x': [1, 2, 3]
})
result = df.groupby(
['a', 'b'], as_index=as_index, observed=True)['x'].sum()
assert_equal(result, expected)
@pytest.mark.parametrize('fill_value', [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(['a', 'b', 'c', 'd'],
categories=['a', 'b', 'c', 'd'], ordered=False)
expected = Categorical([None, 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'], ordered=False)
res = ct.shift(1, fill_value=fill_value)
assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat['A'] = df_cat['A'].astype('category')
df_cat['B'] = df_cat['B'].astype('category')
df_cat['C'] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(['D'], axis=1)
return df_cat
@pytest.mark.parametrize('operation, kwargs', [
('agg', dict(dtype='category')),
('apply', dict())])
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame({'A': ['foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'one', 'three']
}, **kwargs))
expected = Series(data=[1, 3, 2, 4], index=index, name='C')
grouped = df_cat.groupby(['A', 'B'], observed=True)['C']
result = getattr(grouped, operation)(sum)
assert_series_equal(result, expected)
@pytest.mark.parametrize('operation', ['agg', 'apply'])
@pytest.mark.parametrize('observed', [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[CategoricalIndex(['bar', 'foo'], ordered=False),
CategoricalIndex(['one', 'three', 'two'], ordered=False)],
names=['A', 'B']).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3],
index=index, name='C')
grouped = df_cat.groupby(['A', 'B'], observed=observed)['C']
result = getattr(grouped, operation)(sum)
assert_series_equal(result, expected)
@pytest.mark.parametrize("observed, index, data", [
(True, MultiIndex.from_tuples(
[('foo', 'one', 'min'), ('foo', 'one', 'max'),
('foo', 'two', 'min'), ('foo', 'two', 'max'),
('bar', 'one', 'min'), ('bar', 'one', 'max'),
('bar', 'three', 'min'), ('bar', 'three', 'max')],
names=['A', 'B', None]), [1, 1, 3, 3, 2, 2, 4, 4]),
(False, MultiIndex.from_product(
[CategoricalIndex(['bar', 'foo'], ordered=False),
CategoricalIndex(['one', 'three', 'two'], ordered=False),
Index(['min', 'max'])],
names=['A', 'B', None]),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3]),
(None, MultiIndex.from_product(
[CategoricalIndex(['bar', 'foo'], ordered=False),
CategoricalIndex(['one', 'three', 'two'], ordered=False),
Index(['min', 'max'])],
names=['A', 'B', None]),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3])])
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name='C')
result = df_cat.groupby(['A', 'B'], observed=observed)['C'].apply(
lambda x: OrderedDict([('min', x.min()), ('max', x.max())]))
assert_series_equal(result, expected)
| bsd-3-clause |
Supermem/tushare | tushare/stock/classifying.py | 16 | 8914 | # -*- coding:utf-8 -*-
"""
获取股票分类数据接口
Created on 2015/02/01
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
import json
import re
from pandas.util.testing import _network_error_classes
import time
import tushare.stock.fundamental as fd
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_industry_classified():
"""
获取行业分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :行业名称
"""
df = _get_type_data(ct.SINA_INDUSTRY_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['ids']))
data = []
ct._write_head()
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data, ignore_index=True)
return data
def get_concept_classified():
"""
获取概念分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :概念名称
"""
ct._write_head()
df = _get_type_data(ct.SINA_CONCEPTS_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sf'], ct.PAGES['cpt']))
data = []
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data,ignore_index=True)
return data
def get_area_classified():
"""
获取地域分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
area :地域名称
"""
df = fd.get_stock_basics()
df = df[['name', 'area']]
df.reset_index(level=0, inplace=True)
df = df.sort('area').reset_index(drop=True)
return df
def get_gem_classified():
"""
获取创业板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0] == '3']
df = df.sort('code').reset_index(drop=True)
return df
def get_sme_classified():
"""
获取中小板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0:3] == '002']
df = df.sort('code').reset_index(drop=True)
return df
def get_st_classified():
"""
获取风险警示板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.name.str.contains('ST')]
df = df.sort('code').reset_index(drop=True)
return df
def _get_detail(tag, retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
ct._write_console()
request = Request(ct.SINA_DATA_DETAIL_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['jv'],
tag))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
except _network_error_classes:
pass
else:
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
jstr = json.dumps(text)
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}), columns=ct.THE_FIELDS)
df = df[ct.FOR_CLASSIFY_B_COLS]
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _get_type_data(url):
try:
request = Request(url)
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str.split('=')[1]
data_json = json.loads(data_str)
df = pd.DataFrame([[row.split(',')[0], row.split(',')[1]] for row in data_json.values()],
columns=['tag', 'name'])
return df
except Exception as er:
print(str(er))
def get_hs300s():
"""
获取沪深300当前成份股及所占权重
Return
--------
DataFrame
code :股票代码
name :股票名称
date :日期
weight:权重
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
wt = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300w']), parse_cols=[0,3,6])
wt.columns = ct.FOR_CLASSIFY_W_COLS
wt['code'] = wt['code'].map(lambda x :str(x).zfill(6))
return pd.merge(df,wt)
except Exception as er:
print(str(er))
def get_sz50s():
"""
获取上证50成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['sz50b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_zz500s():
"""
获取中证500成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['zz500b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_terminated():
"""
获取终止上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.TERMINATED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def get_suspended():
"""
获取暂停上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.SUSPENDED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
rhuelga/sms-tools | lectures/03-Fourier-properties/plots-code/anal-synth.py | 24 | 1154 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
from scipy.io.wavfile import read
from scipy.fftpack import fft, ifft
import math
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.hanning(501)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
y = DFT.dftSynth(mX, pX, w.size)*sum(w)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot(4,1,1)
plt.plot(np.arange(-hM1, hM2), x1*w, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
plt.subplot(4,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size,min(mX),max(mX)])
plt.title ('mX')
plt.subplot(4,1,3)
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0,pX.size,min(pX),max(pX)])
plt.title ('pX')
plt.subplot(4,1,4)
plt.plot(np.arange(-hM1, hM2), y, lw=1.5)
plt.axis([-hM1, hM2, min(y), max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('anal-synth.png')
plt.show()
| agpl-3.0 |
datapythonista/pandas | pandas/tests/util/test_assert_frame_equal.py | 3 | 11150 | import pytest
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def by_blocks_fixture(request):
return request.param
@pytest.fixture(params=["DataFrame", "Series"])
def obj_fixture(request):
return request.param
def _assert_frame_equal_both(a, b, **kwargs):
"""
Check that two DataFrame equal.
This check is performed commutatively.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
tm.assert_frame_equal(a, b, **kwargs)
tm.assert_frame_equal(b, a, **kwargs)
def _assert_not_frame_equal(a, b, **kwargs):
"""
Check that two DataFrame are not equal.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
msg = "The two DataFrames were equal when they shouldn't have been"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(a, b, **kwargs)
def _assert_not_frame_equal_both(a, b, **kwargs):
"""
Check that two DataFrame are not equal.
This check is performed commutatively.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
_assert_not_frame_equal(a, b, **kwargs)
_assert_not_frame_equal(b, a, **kwargs)
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_row_order_mismatch(check_like, obj_fixture):
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [3, 2, 1], "B": [6, 5, 4]}, index=["c", "b", "a"])
if not check_like: # Do not ignore row-column orderings.
msg = f"{obj_fixture}.index are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
else:
_assert_frame_equal_both(df1, df2, check_like=check_like, obj=obj_fixture)
@pytest.mark.parametrize(
"df1,df2",
[
(DataFrame({"A": [1, 2, 3]}), DataFrame({"A": [1, 2, 3, 4]})),
(DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), DataFrame({"A": [1, 2, 3]})),
],
)
def test_frame_equal_shape_mismatch(df1, df2, obj_fixture):
msg = f"{obj_fixture} are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, obj=obj_fixture)
@pytest.mark.parametrize(
"df1,df2,msg",
[
# Index
(
DataFrame.from_records({"a": [1, 2], "c": ["l1", "l2"]}, index=["a"]),
DataFrame.from_records({"a": [1.0, 2.0], "c": ["l1", "l2"]}, index=["a"]),
"DataFrame\\.index are different",
),
# MultiIndex
(
DataFrame.from_records(
{"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
),
DataFrame.from_records(
{"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
),
"MultiIndex level \\[0\\] are different",
),
],
)
def test_frame_equal_index_dtype_mismatch(df1, df2, msg, check_index_type):
kwargs = {"check_index_type": check_index_type}
if check_index_type:
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, **kwargs)
else:
tm.assert_frame_equal(df1, df2, **kwargs)
def test_empty_dtypes(check_dtype):
columns = ["col1", "col2"]
df1 = DataFrame(columns=columns)
df2 = DataFrame(columns=columns)
kwargs = {"check_dtype": check_dtype}
df1["col1"] = df1["col1"].astype("int64")
if check_dtype:
msg = r"Attributes of DataFrame\..* are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, **kwargs)
else:
tm.assert_frame_equal(df1, df2, **kwargs)
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_index_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.index are different
{obj_fixture}\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='object'\\)"""
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "d"])
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_columns_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.columns are different
{obj_fixture}\\.columns values are different \\(50\\.0 %\\)
\\[left\\]: Index\\(\\['A', 'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\['A', 'b'\\], dtype='object'\\)"""
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
def test_frame_equal_block_mismatch(by_blocks_fixture, obj_fixture):
obj = obj_fixture
msg = f"""{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) are different
{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) values are different \\(33\\.33333 %\\)
\\[index\\]: \\[0, 1, 2\\]
\\[left\\]: \\[4, 5, 6\\]
\\[right\\]: \\[4, 5, 7\\]"""
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df2 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 7]})
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)
@pytest.mark.parametrize(
"df1,df2,msg",
[
(
DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),
DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "e̊"]}),
"""{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) are different
{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) values are different \\(33\\.33333 %\\)
\\[index\\]: \\[0, 1, 2\\]
\\[left\\]: \\[é, è, ë\\]
\\[right\\]: \\[é, è, e̊\\]""",
),
(
DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),
DataFrame({"A": ["a", "a", "a"], "E": ["e", "e", "e"]}),
"""{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) are different
{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) values are different \\(100\\.0 %\\)
\\[index\\]: \\[0, 1, 2\\]
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[a, a, a\\]""",
),
],
)
def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):
# see gh-20503
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
msg = msg.format(obj=obj_fixture)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)
def test_assert_frame_equal_extension_dtype_mismatch():
# https://github.com/pandas-dev/pandas/issues/32747
left = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
right = left.astype(int)
msg = (
"Attributes of DataFrame\\.iloc\\[:, 0\\] "
'\\(column name="a"\\) are different\n\n'
'Attribute "dtype" are different\n'
"\\[left\\]: Int64\n"
"\\[right\\]: int[32|64]"
)
tm.assert_frame_equal(left, right, check_dtype=False)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(left, right, check_dtype=True)
def test_assert_frame_equal_interval_dtype_mismatch():
# https://github.com/pandas-dev/pandas/issues/32747
left = DataFrame({"a": [pd.Interval(0, 1)]}, dtype="interval")
right = left.astype(object)
msg = (
"Attributes of DataFrame\\.iloc\\[:, 0\\] "
'\\(column name="a"\\) are different\n\n'
'Attribute "dtype" are different\n'
"\\[left\\]: interval\\[int64, right\\]\n"
"\\[right\\]: object"
)
tm.assert_frame_equal(left, right, check_dtype=False)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(left, right, check_dtype=True)
@pytest.mark.parametrize("right_dtype", ["Int32", "int64"])
def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype):
# https://github.com/pandas-dev/pandas/issues/35715
left = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
right = DataFrame({"a": [1, 2, 3]}, dtype=right_dtype)
tm.assert_frame_equal(left, right, check_dtype=False)
@pytest.mark.parametrize(
"dtype",
[
("timedelta64[ns]"),
("datetime64[ns, UTC]"),
("Period[D]"),
],
)
def test_assert_frame_equal_datetime_like_dtype_mismatch(dtype):
df1 = DataFrame({"a": []}, dtype=dtype)
df2 = DataFrame({"a": []})
tm.assert_frame_equal(df1, df2, check_dtype=False)
def test_allows_duplicate_labels():
left = DataFrame()
right = DataFrame().set_flags(allows_duplicate_labels=False)
tm.assert_frame_equal(left, left)
tm.assert_frame_equal(right, right)
tm.assert_frame_equal(left, right, check_flags=False)
tm.assert_frame_equal(right, left, check_flags=False)
with pytest.raises(AssertionError, match="<Flags"):
tm.assert_frame_equal(left, right)
with pytest.raises(AssertionError, match="<Flags"):
tm.assert_frame_equal(left, right)
def test_assert_frame_equal_columns_mixed_dtype():
# GH#39168
df = DataFrame([[0, 1, 2]], columns=["foo", "bar", 42], index=[1, "test", 2])
tm.assert_frame_equal(df, df, check_like=True)
def test_frame_equal_extension_dtype(frame_or_series, any_nullable_numeric_dtype):
# GH#39410
obj = frame_or_series([1, 2], dtype=any_nullable_numeric_dtype)
tm.assert_equal(obj, obj, check_exact=True)
@pytest.mark.parametrize("indexer", [(0, 1), (1, 0)])
def test_frame_equal_mixed_dtypes(frame_or_series, any_nullable_numeric_dtype, indexer):
dtypes = (any_nullable_numeric_dtype, "int64")
obj1 = frame_or_series([1, 2], dtype=dtypes[indexer[0]])
obj2 = frame_or_series([1, 2], dtype=dtypes[indexer[1]])
msg = r'(Series|DataFrame.iloc\[:, 0\] \(column name="0"\) classes) are different'
with pytest.raises(AssertionError, match=msg):
tm.assert_equal(obj1, obj2, check_exact=True, check_dtype=False)
def test_assert_series_equal_check_like_different_indexes():
# GH#39739
df1 = DataFrame(index=pd.Index([], dtype="object"))
df2 = DataFrame(index=pd.RangeIndex(start=0, stop=0, step=1))
with pytest.raises(AssertionError, match="DataFrame.index are different"):
tm.assert_frame_equal(df1, df2, check_like=True)
| bsd-3-clause |
0x0all/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
KellyChan/Python | python/tensorflow/demos/tensorflow/concepts/conway.py | 3 | 1463 | import numpy as np
import tensorflow as tf
from scipy.signal import convolve2d
from matplotlib import pyplot as plt
import matplotlib.animation as animation
class Conway(object):
def __init__(self):
self.shape = (50, 50)
self.session = tf.Session()
def run(self):
initial_board = self.init_board()
board = tf.placeholder(tf.int32, shape=self.shape, name='board')
board_update = tf.py_func(self.update_board, [board], [tf.int32])
initial_board_values = self.session.run(initial_board)
X = self.session.run(board_update, feed_dict={board: initial_board_values})[0]
ani = animation.FuncAnimation(self.plot(X), self.game_of_life, interval=200, blit=True)
plt.show()
return X
def init_board(self):
initial_board = tf.random_uniform(self.shape, minval=0, maxval=2, dtype=tf.int32)
return initial_board
def update_board(self, X):
N = convolve2d(X, np.ones((3,3)), mode='same', boundary='wrap') - X
X = (N == 3) | (X & (N == 2))
return X
def game_of_life(self, *args):
X = self.session.run(board_update, feed_dict={board: X})[0]
plot.set_array(X)
return plot,
def plot(self, X):
fig = plt.figure()
plot = plt.imshow(X, cmap='Greys', interpolation='nearest')
plt.show()
return fig
if __name__ == '__main__':
conway = Conway()
X = conway.run()
| mit |
google/gps_building_blocks | py/gps_building_blocks/ml/diagnostics/bootstrap_test.py | 1 | 13558 | # Licensed under the Apache License, Version 2.0
"""Tests for gps_building_blocks.py.ml.diagnostics.bootstrap."""
import numpy as np
import pandas as pd
import pandas.util.testing as pandas_testing
from sklearn import linear_model
from absl.testing import absltest
from absl.testing import parameterized
from gps_building_blocks.ml.diagnostics import bootstrap
class BootstrapTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Prepare data
np.random.seed(42)
n_samples, n_features = 35, 70
data = np.random.randn(n_samples, n_features)
# Decreasing coefficients w. alternated signs for visualization
idx = np.arange(n_features)
coefficients = (-1) ** idx * np.exp(-idx / 10)
coefficients[10:] = 0 # sparsify
target = np.dot(data, coefficients)
# Add noise
target += 0.01 * np.random.normal(size=n_samples)
classification_target = np.where(target > np.median(target), 1, 0)
cls.data = pd.DataFrame(data)
cls.target = pd.Series(target)
cls.class_target = pd.Series(classification_target)
def test_regression_iterate(self):
elastic_net = linear_model.ElasticNet()
expected_keys = set(list(self.data.columns) + ['Intercept'])
result = bootstrap.regression_iterate(
elastic_net, self.data, self.target, seed=1)
self.assertIsInstance(result, dict)
# Assert all the keys are the DataFrame columns plus the intercept.
self.assertEqual(expected_keys, set(result))
@parameterized.named_parameters(
('undersample', 0.5),
('oversample', 2))
def test_regression_bootstrap_sample_frac(self, sample_frac):
linear = linear_model.LinearRegression()
result = bootstrap.regression_bootstrap(
self.data, self.target, linear,
regressor_cv=None,
bootstraps=5)
coef_std = result.std(axis=0).mean()
result_sampled = bootstrap.regression_bootstrap(
self.data, self.target, linear,
regressor_cv=None,
bootstraps=5,
sample_frac=sample_frac)
coef_std_sampled = result_sampled.std(axis=0).mean()
self.assertNotEqual(coef_std, coef_std_sampled)
@parameterized.named_parameters(
('negative', -0.5),
('less_than_degfreedom', 1/35))
def test_regression_bootstrap_sample_frac_valueerror(self, sample_frac):
linear = linear_model.LinearRegression()
with self.assertRaises(ValueError):
bootstrap.regression_bootstrap(
self.data, self.target, linear,
regressor_cv=None,
bootstraps=5,
sample_frac=sample_frac)
def test_regression_iterate_no_intercept(self):
elastic_net = linear_model.ElasticNet(fit_intercept=False)
expected_keys = set(self.data.columns)
result = bootstrap.regression_iterate(
elastic_net, self.data, self.target, seed=1)
self.assertIsInstance(result, dict)
# Assert all the keys are the DataFrame columns.
self.assertEqual(expected_keys, set(result))
def test_regression_iterate_seed(self):
elastic_net = linear_model.ElasticNet(random_state=123)
expected_result = bootstrap.regression_iterate(
elastic_net, self.data, self.target, seed=1)
result = bootstrap.regression_iterate(
elastic_net, self.data, self.target, seed=1)
self.assertIsInstance(result, dict)
self.assertEqual(result, expected_result)
@parameterized.named_parameters(
('default_regressor_default_regressor_cv', bootstrap.regressor_default(),
bootstrap.regressor_cv_default().set_params(cv=3, n_alphas=10), 5, 1),
('default_regressor_none_regressor_cv', bootstrap.regressor_default(),
None, 5, 1),
('linear_regressor_none_regressor_cv', linear_model.LinearRegression(),
None, 5, 1),
('inner_regressor_cv',
bootstrap.regressor_cv_default().set_params(cv=3, n_alphas=10), None, 5,
1),
('elastic_net_cv_multiproc', linear_model.ElasticNet(),
bootstrap.regressor_cv_default().set_params(cv=3, n_alphas=10), 5, -1),
('elastic_net_cv_multiproc_4cpus', linear_model.ElasticNet(),
linear_model.ElasticNetCV(cv=3), 5, 4),
('elastic_net_cv', linear_model.ElasticNet(),
linear_model.ElasticNetCV(cv=3), 5, 1),
('elastic_net_multiproc', linear_model.ElasticNet(), None, 5, -1),
('ridge', linear_model.Ridge(), None, 5, 1),
('ridge_multiproc', linear_model.Ridge(), None, 5, -1),
('ridge_multiproc_10_bootstraps', linear_model.Ridge(), None, 10, -1),
('lasso', linear_model.Lasso(), None, 5, 1),
('lasso_multiproc', linear_model.Lasso(), None, 5, -1),
('lars', linear_model.Lars(n_nonzero_coefs=5), None, 5, 1),
('lars_multiproc', linear_model.Lars(n_nonzero_coefs=5), None, 5, -1))
def test_regression_bootstrap(
self, regressor, regressor_cv, bootstraps, n_jobs):
result = bootstrap.regression_bootstrap(
data=self.data,
target=self.target,
regressor=regressor,
regressor_cv=regressor_cv,
bootstraps=bootstraps,
n_jobs=n_jobs,
verbose=False)
self.assertIsInstance(result, pd.DataFrame)
self.assertLen(result, bootstraps) # Same rows as many bootstraps
self.assertEqual(result.shape[1], self.data.shape[1]+1)
def test_classification_bootstrap(self):
ridge_class = linear_model.RidgeClassifier()
ridge_class_cv = linear_model.RidgeClassifierCV()
result = bootstrap.regression_bootstrap(
data=self.data,
target=self.class_target,
regressor=ridge_class,
regressor_cv=ridge_class_cv,
verbose=False,
bootstraps=5)
self.assertIsInstance(result, pd.DataFrame)
self.assertEqual(result.shape[1], self.data.shape[1]+1)
@parameterized.named_parameters(
('linear_regressor_elastic_net_regressor_cv',
linear_model.LinearRegression(), bootstrap.regressor_cv_default(), 5, 1),
('ridge_regressor_elastic_net_regressor_cv',
linear_model.Ridge(), bootstrap.regressor_cv_default(), 5, 1),
('lassolars_regressor_elastic_net_regressor_cv',
linear_model.LassoLars(), bootstrap.regressor_cv_default(), 5, 1))
def test_regression_bootstrap_mismatch_regressor_cv(
self, regressor, regressor_cv, bootstraps, n_jobs):
with self.assertRaises(ValueError):
bootstrap.regression_bootstrap(
data=self.data,
target=self.target,
regressor=regressor,
regressor_cv=regressor_cv,
bootstraps=bootstraps,
n_jobs=n_jobs,
verbose=False)
@parameterized.named_parameters(
('elasticnet_elasticnet', linear_model.ElasticNet(),
linear_model.ElasticNet()),
('none_elasticnet', None, linear_model.ElasticNet()))
def test_regression_bootstrap_unsupported_regressor_cv(
self, regressor, regressor_cv):
with self.assertRaises(NotImplementedError):
bootstrap.regression_bootstrap(
self.data,
self.target,
regressor=regressor,
regressor_cv=regressor_cv)
@parameterized.named_parameters(
('elasticnetcv_elasticnetcv', linear_model.ElasticNetCV(),
linear_model.ElasticNetCV()),
('ridgecv_ridgecv', linear_model.RidgeCV(), linear_model.RidgeCV()),
('ridgecv_elasticnetcv', linear_model.RidgeCV(),
linear_model.ElasticNetCV()))
def test_regression_bootstrap_runtime_error(
self, regressor, regressor_cv):
with self.assertRaises(RuntimeError):
bootstrap.regression_bootstrap(
self.data,
self.target,
regressor=regressor,
regressor_cv=regressor_cv)
def test_regression_not_numeric_index(self):
"""Makes sure that regression_iterate handles non numeric indexing."""
elastic_net = linear_model.ElasticNet(random_state=123)
data = self.data.copy()
target = self.target.copy()
# Convert index to string for `data` and `target`
data.index = [f'data_{index}' for index in data.index]
target.index = [f'test_{index}' for index in target.index]
expected_result = bootstrap.regression_iterate(
elastic_net, self.data, self.target, seed=1)
result = bootstrap.regression_iterate(
elastic_net, data, target, seed=1)
self.assertIsInstance(result, dict)
self.assertEqual(result, expected_result)
def test_regression_bootstrap_sampled_hyperpar_tune(self):
"""Compares the single and multi hyperparameter tuning."""
# Single hyperparameter tune prior to bootstrapping.
kwargs = {'data': self.data,
'target': self.target,
'bootstraps': 5}
elastic_net = linear_model.ElasticNet(random_state=1)
elastic_net_cv = linear_model.ElasticNetCV(random_state=10, cv=3)
outer_tune = bootstrap.regression_bootstrap(
regressor=elastic_net, regressor_cv=elastic_net_cv, **kwargs)
outer_coef_std = outer_tune.std(axis=0).mean()
# Hyperparameters re-tuned on every bootstrap sample.
elastic_net = linear_model.ElasticNetCV(random_state=10, cv=3)
elastic_net_cv = None
outer_inner_tune = bootstrap.regression_bootstrap(
regressor=elastic_net, regressor_cv=elastic_net_cv, **kwargs)
outer_inner_coef_std = outer_inner_tune.std(axis=0).mean()
# Confirm that running separate instances gives same results for single
# tune. This is identical setup to outer_tune.
elastic_net = linear_model.ElasticNet(random_state=1)
elastic_net_cv = linear_model.ElasticNetCV(random_state=10, cv=3)
outer_tune2 = bootstrap.regression_bootstrap(
regressor=elastic_net, regressor_cv=elastic_net_cv, **kwargs)
outer2_coef_std = outer_tune2.std(axis=0).mean()
self.assertNotEqual(outer_coef_std, outer_inner_coef_std)
self.assertEqual(outer_coef_std, outer2_coef_std)
def test_resample_without_replacement(self):
"""Ensures sampling without replacement is working as intended."""
resampled_data, resampled_target = bootstrap.resample(
self.data, self.target, replacement=False)
# Make sure there are no duplicate rows. `nunique` returns the numbers of
# unique rows in a series.
self.assertEqual(resampled_data.index.nunique(), len(self.data))
# Make sure data and target index match
self.assertListEqual(
resampled_data.index.tolist(), resampled_target.index.tolist())
def test_resample_with_replacement(self):
"""Ensures sampling with replacement is working as intended."""
resampled_data, _ = bootstrap.resample(
self.data, self.target, replacement=True)
# Make sure there are duplicate rows as we're using replacement
self.assertLess(resampled_data.index.nunique(), len(self.data))
def test_resample_replacement_oversample(self):
"""Ensures oversampling without replacement is not allowed."""
with self.assertRaises(ValueError):
_ = bootstrap.resample(
self.data, self.target, sample_frac=2, replacement=False)
def test_regression_iterate_randomize_target(self):
"""Ensures the target randomization delivers different results."""
kw = {'regressor': linear_model.Ridge(),
'data': self.data,
'target': self.target}
bootstrapped_results = bootstrap.regression_iterate(
randomize_target=False, **kw)
randomized_results = bootstrap.regression_iterate(
randomize_target=True, **kw)
# Checks randomize and bootstrap results have same keys.
self.assertEqual(randomized_results.keys(), bootstrapped_results.keys())
# But they have different data.
self.assertNotEqual(
randomized_results.values(), bootstrapped_results.values())
def test_regression_bootstrap_without_replacement(self):
"""Compares results with and without replacement."""
kwargs = {'data': self.data,
'target': self.target,
'regressor': linear_model.Ridge(),
'regressor_cv': None,
'sample_frac': 0.8,
'bootstraps': 5}
replacement_result = bootstrap.regression_bootstrap(
replacement=True, **kwargs)
without_replacement_result = bootstrap.regression_bootstrap(
replacement=False, **kwargs)
# Results should be different as data has been resampled differently.
self.assertFalse(replacement_result.equals(without_replacement_result))
@parameterized.named_parameters(
('with_intercept', linear_model.Ridge(fit_intercept=True), 5),
('without_intercept', linear_model.Ridge(fit_intercept=False), 5))
def test_permutation_test(self, regressor, n_permutations):
"""Ensure the permutation test works as intended."""
feature_names = self.data.columns.tolist()
if regressor.fit_intercept:
feature_names.append('Intercept')
permutation_results = bootstrap.permutation_test(
data=self.data, target=self.target, regressor=regressor,
n_permutations=n_permutations, n_jobs=1, verbose=False)
self.assertLen(permutation_results, n_permutations)
self.assertListEqual(permutation_results.columns.tolist(), feature_names)
def test_permutation_test_seed(self):
"""Ensures the permutation results are reproducible and seed works."""
kw = {
'data': self.data, 'target': self.target,
'regressor': linear_model.Ridge(), 'n_permutations': 3,
'n_jobs': 1, 'verbose': False}
first_results = bootstrap.permutation_test(**kw)
second_results = bootstrap.permutation_test(**kw)
pandas_testing.assert_frame_equal(first_results, second_results)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
KarchinLab/2020plus | src/features/python/feature_utils.py | 1 | 13605 | import pandas.io.sql as psql
import src.utils.python.util as _utils
import numpy as np
import pandas as pd
import logging
import sys
logger = logging.getLogger(__name__)
def process_features(df):
"""Processes mutation consequence types from probabilistic 20/20.
"""
# rename column headers
rename_dict = {'silent snv': 'silent'}
df = df.rename(columns=rename_dict)
# get nonsilent/silent
nonsilent_to_silent = (df['non-silent snv']+df['inframe indel']+df['frameshift indel']).astype(float)/(df['silent']+1)
###
# process score information
###
# process conservation info for missense mutations
if 'Total Missense MGAEntropy' in df.columns:
num_mis = df['missense'].copy()
#num_mis[num_mis==0] = 1
df['Mean Missense MGAEntropy'] = np.nan
df.loc[num_mis!=0, 'Mean Missense MGAEntropy'] = df['Total Missense MGAEntropy'][num_mis!=0] / num_mis[num_mis!=0]
df['Mean Missense MGAEntropy'] = df['Mean Missense MGAEntropy'].fillna(df['Mean Missense MGAEntropy'].max())
del df['Total Missense MGAEntropy']
# calculate the mean VEST score
if 'Total Missense VEST Score' in df.columns:
sum_cols = ['Total Missense VEST Score', 'lost stop',
'lost start', 'splice site', 'frameshift indel', 'inframe indel',
'nonsense']
all_muts = ['non-silent snv', 'silent', 'inframe indel', 'frameshift indel']
tot_vest_score = df[sum_cols].sum(axis=1).astype(float)
num_muts = df[all_muts].sum(axis=1).astype(float)
df['Mean VEST Score'] = tot_vest_score / num_muts
df.loc[np.isinf(df['Mean VEST Score']), 'Mean VEST Score'] = np.nan # hack to prevent infinity
#df['Missense VEST Score'] = df['Total Missense VEST Score'] / num_muts
#df['VEST normalized missense position entropy'] = df['normalized missense position entropy'] * (1.-df['Missense VEST Score'])
del df['Total Missense VEST Score']
# drop id col
df = df.drop(['ID', 'non-silent snv'], axis=1)
# handle mutation counts
count_cols = ['silent', 'nonsense', 'lost stop', 'lost start', 'missense',
'recurrent missense', 'splice site', 'inframe indel', 'frameshift indel']
mycounts = df[count_cols]
df['missense'] -= df['recurrent missense']
# calculate features
miss_to_silent = (df['missense']+df['recurrent missense']).astype(float)/(df['silent']+1)
# normalize out of total mutations
total_cts = df[count_cols].sum(axis=1)
norm_cts = mycounts.div(total_cts.astype(float), axis=0)
norm_cts = norm_cts.fillna(0.0)
# combine lost stop and lost start
lost_start_stop = norm_cts['lost stop'] + norm_cts['lost start']
df = df.drop(['lost start', 'lost stop'], axis=1)
norm_cts = norm_cts.drop(['lost start', 'lost stop'], axis=1)
count_cols.pop(3)
count_cols.pop(2)
df[count_cols] = norm_cts
df['lost start and stop'] = lost_start_stop
df['missense to silent'] = miss_to_silent
df['non-silent to silent'] = nonsilent_to_silent
return df
def label_gene(gene,
oncogene=True,
tsg=True,
kind='onco_tsg'):
"""Label a gene according to list of oncogenes
and tsg."""
# set integer representation of classes
other_num = _utils.other_label
if oncogene: onco_num = _utils.onco_label
if tsg: tsg_num = _utils.tsg_label if oncogene else _utils.onco_label
smg_num = 1
# classify genes
if kind == 'onco_tsg':
if gene in _utils.oncogene_set:
return onco_num
elif gene in _utils.tsg_set:
return tsg_num
else:
return other_num
elif kind == 'smg':
if gene in _utils.smg_list:
return smg_num
else:
return other_num
def randomize(df, prng=None):
"""Randomly shuffles the features and labels the "true" classes.
Calls random_sort to do the random shuffling.
Parameters
----------
df : pd.DataFrame
contains features for training
prng : np.random.RandomState
pseudo random number generator state
Returns
-------
x : pd.DataFrame
training features
y : pd.Series
true class labels
"""
x = random_sort(df, prng) # randomly sort data
y = x.index.to_series().apply(label_gene) # get gene labels
return x, y
def random_sort(df, prng=None):
"""Randomly shuffle a DataFrame.
NOTE: if the training data is not randomly shuffled, then
supervised learning may find artifacts related to the order
of the data.
Parameters
----------
df : pd.DataFrame
dataframe with feature information
Returns
-------
df : pd.DataFrame
Randomly shuffled data frame
"""
# get new random state if not specified
if prng is None:
prng = np.random.RandomState()
# get random order
random_indices = prng.choice(df.index.values, # sample from 'genes'
len(df), # number of samples
replace=False) # sample without replacement
# change order of df
random_df = df.loc[random_indices].copy()
return random_df
def check_num_classes(class_labels):
class_cts = class_labels.value_counts()
num_sufficient_uniq = (class_cts > 5).sum()
if num_sufficient_uniq < 3:
sys.exit('ERROR: There were either no or very few mutated oncogenes or tumor suppressor genes '
'found in your data! Did you supply a full pan-cancer dataset? '
'Or have you modified the training list of oncogenes or '
'tumor suppressor genes? Or did you subset your mutations to not include oncogenes/tumor suppressor genes in the training list?')
############################################
# Old feature processing functions
############################################
#import src.data_analysis.python.feature_matrix as fmat
#import src.data_analysis.python.position_entropy as pentropy
def retrieve_gene_features(conn, opts,
get_entropy=True):
"""Retrieve gene information from the gene_features table.
See the gene_features module to understand the gene_features
database table.
Parameters
----------
conn : mysql/sqlite connection
connection to db with gene_features table
options : dict
options for getting info
get_entropy : bool
option to togle the use of entropy features.
Since entropy features are read from a file in this function, it may
induce a not necessary dependency on previously running commands.
To avoid this, set get_entropy=False and then compute entropy features
separately.
Returns
-------
df : pd.dataframe
dataframe of gene lengths
"""
logger.info('Retrieving features of genes . . .')
selected_cols = ['gene']
# retrieve more features if specified by command line
if opts['gene_length']:
selected_cols.append('gene_length')
if opts['mutation_rate']:
selected_cols.append('noncoding_mutation_rate')
if opts['replication_time']:
selected_cols.append('replication_time')
if opts['expression']:
selected_cols.append('expression_CCLE as expression')
if opts['hic']:
selected_cols.append('HiC_compartment')
if opts['betweeness']:
selected_cols.append('gene_betweeness')
if opts['degree']:
selected_cols.append('gene_degree')
# get info from gene_features table
logger.info('Retrieving gene feature information from gene_features table . . . ')
sql = "SELECT %s FROM gene_features" % ', '.join(selected_cols)
df = psql.frame_query(sql, conn)
df = df.set_index('gene')
df['gene'] = df.index
logger.info('Finished retrieving gene features from gene_features table.')
# fill graph stats with zeros if gene not in Biogrid
if 'gene_betweeness' in df.columns:
df['gene_betweeness'] = df['gene_betweeness'].fillna(0)
if 'gene_degree' in df.columns:
df['gene_degree'] = df['gene_degree'].fillna(0)
# get position entropy features
if get_entropy:
entropy_cfg = _utils.get_output_config('position_entropy')
mutation_pos_entropy = pd.read_csv(_utils.result_dir + entropy_cfg['mutation_pos_entropy'],
sep='\t', index_col=0)
missense_pos_entropy = pd.read_csv(_utils.result_dir + entropy_cfg['missense_pos_entropy'],
sep='\t', index_col=0)
#df['mutation position entropy'] = mutation_pos_entropy['mutation position entropy']
#df['pct of uniform mutation entropy'] = mutation_pos_entropy['pct of uniform mutation entropy']
df['missense position entropy'] = missense_pos_entropy['missense position entropy']
df['pct of uniform missense entropy'] = missense_pos_entropy['pct of uniform missense entropy']
return df
def wrapper_retrieve_gene_features(opts):
"""Wrapper arround the retrieve_gene_features function in the
features module.
Parameters
----------
opts : dict
command line options
Returns
-------
additional_features : pd.DataFrame
"""
# get additional features
db_cfg = _utils.get_db_config('2020plus')
conn = sqlite3.connect(db_cfg['db'])
additional_features = retrieve_gene_features(conn, opts, get_entropy=False)
conn.close()
return additional_features
def _filter_rows(df, min_ct=0):
"""Filter out rows with counts less than the minimum."""
row_sums = df.T.sum()
filtered_df = df[row_sums >= min_ct]
return filtered_df
def normalize_mutational_features(df, min_count):
"""Normalizes mutation type counts and aggregate counts into
recurrent vs deleterious.
Parameters
----------
df : pd.DataFrame
data frame with gene names and mutation counts for each type
min_count : int
minimum number of mutations for a gene to be used
"""
if 'gene' in df.columns:
df = df.set_index('gene') # hack to prevent dividing genes by a number
df = _filter_rows(df, min_ct=min_count) # drop rows below minimum total mutations
recurrent_mutation = df['recurrent missense'] # + df['recurrent indel']
#deleterious_mutation = df['lost stop'] + df['nonsense'] + df['frame shift'] + df['no protein'] + df['splicing mutation']
deleterious_mutation = df['Nonstop_Mutation+Translation_Start_Site'] + df['Nonsense_Mutation'] + df['Frame_Shift_Indel'] + df['Splice_Site']
missense_to_silent = df['Missense_Mutation'] / (df['Silent']+1).astype(float)
row_sums = df.sum(axis=1).astype(float)
df = df.div(row_sums-recurrent_mutation, axis=0) # normalize each row
df['recurrent count'] = recurrent_mutation
df['deleterious count'] = deleterious_mutation
#df['total'] = row_sums
df['gene'] = df.index # get back the gene column from the index
return df
def generate_features(mutation_df, opts,
covariate_features=None):
"""Main function that generates features.
Parameters
----------
mutation_df : pd.DataFrame
data frame containing mutations from maf/sqlitedb
opts : dict
dictionary containing the command line options.
opts is not necessary if covariate_features
is specified.
covariate_features : pd.DataFrame (Default: None)
if covariate data frame already obtained, then utilize
that as input. Otherwise, retreive from sqlite database.
Returns
-------
all_features : pd.DataFrame
features with both mutational type features and covariate features
"""
if type(covariate_features) is not pd.DataFrame:
covariate_features = wrapper_retrieve_gene_features(opts)
mutational_features = process_mutational_features(mutation_df)
all_features = pd.merge(mutational_features, covariate_features,
how='left', on='gene')
return all_features
def process_mutational_features(mydf):
"""Performs feature processing pipeline.
Parameters
----------
mydf : pd.DataFrame
data frame containing the desired raw data for computation of
features for classifier
Returns
-------
proc_feat_df: pd.DataFrame
dataframe consisting of features for classification
"""
# rename process of columns to ensure compatability with previously
# written code
mydf = mydf.rename(columns={'Protein_Change': 'AminoAcid',
'DNA_Change': 'Nucleotide'})
# process features
feat_list = fmat.generate_feature_matrix(mydf, 2)
headers = feat_list.pop(0) # remove header row
feat_df = pd.DataFrame(feat_list, columns=headers) # convert to data frame
proc_feat_df = normalize_mutational_features(feat_df, 0)
miss_ent_df = pentropy.missense_position_entropy(mydf[['Gene', 'AminoAcid']])
# mut_ent_df = pentropy.mutation_position_entropy(mydf[['Gene', 'AminoAcid']])
# encorporate entropy features
#proc_feat_df['mutation position entropy'] = mut_ent_df['mutation position entropy']
#proc_feat_df['pct of uniform mutation entropy'] = mut_ent_df['pct of uniform mutation entropy']
proc_feat_df['missense position entropy'] = miss_ent_df['missense position entropy']
proc_feat_df['pct of uniform missense entropy'] = miss_ent_df['pct of uniform missense entropy']
return proc_feat_df
| apache-2.0 |
edhuckle/statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
tvwenger/kd | setup.py | 1 | 1077 | """
Copyright(C) 2017-2020 by
Trey V. Wenger; [email protected]
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup
setup(
name='kd',
version='2.0',
description='Kinematic distance utilities',
author='Trey V. Wenger',
author_email='[email protected]',
packages=['kd'],
install_requires=['numpy', 'matplotlib', 'scipy', 'pathos'],
package_data={'kd':['curve_data_wise_small.sav', 'reid19_params.pkl']},
)
| gpl-3.0 |
depet/scikit-learn | sklearn/linear_model/randomized_l1.py | 8 | 22876 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, safe_asarray,
check_arrays, safe_mask)
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set.astype(np.float)
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_arrays(X, y)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'])(
estimator_func, X, y,
scaling=self.scaling,
n_resampling=self.n_resampling,
n_jobs=self.n_jobs,
verbose=self.verbose,
pre_dispatch=self.pre_dispatch,
random_state=self.random_state,
sample_fraction=self.sample_fraction,
**params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return safe_asarray(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is thepath to the caching directory.
Attributes
----------
`scores_` : array, shape = [n_features]
Feature scores between 0 and 1.
`all_scores_` : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold: float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=False
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is thepath to the caching directory.
Attributes
----------
`scores_` : array, shape = [n_features]
Feature scores between 0 and 1.
`all_scores_` : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
adit-chandra/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 3 | 30720 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_numpy(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.numpy_input, self.numpy_target)
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_generic_arraylike(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.arraylike_input,
self.arraylike_target)
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
lin-credible/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 103 | 41083 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
cfobel/zmq_helpers | zmq_helpers/utils.py | 1 | 5151 | import urllib2
import sys
import logging
from uuid import uuid4
import zmq
from path_helpers import path
# URL for service names and port numbers, as defined in Internet Engineering
# Task Force (IETF) [RFC6335][1], which:
#
# > defines the procedures that the Internet Assigned Numbers Authority
# > (IANA) uses when handling assignment and other requests related to
# > the Service Name and Transport Protocol Port Number registry".
#
# [1]: http://tools.ietf.org/html/rfc6335
IANA_LIST_URL = ('http://www.iana.org/assignments/'
'service-names-port-numbers/'
'service-names-port-numbers.csv')
def bind_to_random_port(sock, port_ranges=None):
if port_ranges is None:
port_ranges = get_unassigned_port_ranges()
for i, port_range in port_ranges.iterrows():
for port in xrange(port_range.start, port_range.end + 1):
try:
sock.bind('tcp://*:%d' % port)
return port
except zmq.ZMQError:
pass
raise
def get_unassigned_port_ranges(csv_path=None):
import pandas as pd
if csv_path is None:
import pkg_resources
base_path = path(pkg_resources.resource_filename('zmq_helpers', ''))
# Load cached unassigned port ranges, as defined in Internet
# Engineering Task Force (IETF) [RFC6335][1], which:
#
# > defines the procedures that the Internet Assigned Numbers Authority
# > (IANA) uses when handling assignment and other requests related to
# > the Service Name and Transport Protocol Port Number registry".
#
# [1]: http://tools.ietf.org/html/rfc6335
cache_path = base_path.joinpath('static',
'service-names-port-numbers.h5')
return pd.read_hdf(str(cache_path), '/unassigned_ranges')
df = pd.read_csv(csv_path)
unassigned = df.loc[df.Description.str.lower() == 'unassigned'].copy()
port_ranges = pd.DataFrame([[int(x), int(x)]
if '-' not in x else map(int, x.split('-'))
for x in unassigned['Port Number']],
columns=['start', 'end'])
port_ranges['count'] = port_ranges.end - port_ranges.start
port_ranges['inv_mod'] = 10 - (port_ranges['start'] % 10)
port_ranges.sort(['inv_mod', 'count'], inplace=True)
return port_ranges.drop('inv_mod', axis=1)[::-1].reset_index(drop=True)
def log_label(obj=None, function_name=True):
parts = []
if obj:
parts += [obj.__class__.__module__, obj.__class__.__name__]
if function_name:
parts += [callersname()]
if parts[0] == '__main__':
del parts[0]
return '[%s]' % '.'.join(parts)
def unique_ipc_uri():
return 'ipc:///tmp/' + uuid4().hex
def cleanup_ipc_uris(uris):
for uri in uris:
if uri.startswith('ipc://'):
uri = path(uri.replace('ipc://', ''))
if uri.exists():
logging.getLogger('[utils.cleanup_ipc_uris]').debug('remove: %s' % uri)
uri.remove()
## {{{ http://code.activestate.com/recipes/66062/ (r1)
# use sys._getframe() -- it returns a frame object, whose attribute
# f_code is a code object, whose attribute co_name is the name:
def whoami():
return sys._getframe(1).f_code.co_name
# this uses argument 1, because the call to whoami is now frame 0.
# and similarly:
def callersname():
return sys._getframe(2).f_code.co_name
def test_port(addr, port=None):
import socket
from random import randint
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port is None:
port = randint(1024, 65535)
s.bind((addr, port))
return port
def get_available_ports(count=1, interface_addr='*', exclude=None):
from random import randint
if exclude is None:
exclude = []
exclude = set(exclude)
ports = []
if interface_addr == '*':
addr = 'localhost'
else:
addr = interface_addr
preferred_ports_path = path('~/.zmq_helpers/preferred_ports.txt').expand()
preferred_ports = []
if preferred_ports_path.isfile():
preferred_ports += map(int, preferred_ports_path.lines())
while len(ports) < count:
try:
if preferred_ports:
port = preferred_ports.pop(0)
else:
port = randint(1024, 65535)
if port not in exclude:
port = test_port(addr, port)
ports.append(port)
exclude.add(port)
except (Exception, ), e:
#import traceback
#traceback.print_exc()
pass
return ports
def get_random_tcp_uris(addr, count=1, exclude_ports=None):
if exclude_ports is None:
exclude_ports = []
ports = get_available_ports(count, addr, exclude_ports)
return ['tcp://%s:%s' % (addr, port) for port in ports]
def get_random_tcp_uri(addr, exclude_ports=None):
return get_random_tcp_uris(addr, exclude_ports=exclude_ports)[0]
def get_public_ip():
return urllib2.urlopen('http://myip.dnsomatic.com').read()
| gpl-3.0 |
Heerozh/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
majetideepak/arrow | python/examples/flight/client.py | 2 | 5376 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An example Flight CLI client."""
import argparse
import sys
import pyarrow
import pyarrow.flight
def list_flights(args, client):
print('Flights\n=======')
for flight in client.list_flights():
descriptor = flight.descriptor
if descriptor.descriptor_type == pyarrow.flight.DescriptorType.PATH:
print("Path:", descriptor.path)
elif descriptor.descriptor_type == pyarrow.flight.DescriptorType.CMD:
print("Command:", descriptor.command)
else:
print("Unknown descriptor type")
print("Total records:", end=" ")
if flight.total_records >= 0:
print(flight.total_records)
else:
print("Unknown")
print("Total bytes:", end=" ")
if flight.total_bytes >= 0:
print(flight.total_bytes)
else:
print("Unknown")
print("Number of endpoints:", len(flight.endpoints))
if args.list:
print(flight.schema)
print('---')
print('\nActions\n=======')
for action in client.list_actions():
print("Type:", action.type)
print("Description:", action.description)
print('---')
def do_action(args, client):
try:
buf = pyarrow.allocate_buffer(0)
action = pyarrow.flight.Action(args.action_type, buf)
print('Running action', args.action_type)
for result in client.do_action(action):
print("Got result", result.body.to_pybytes())
except pyarrow.lib.ArrowIOError as e:
print("Error calling action:", e)
def get_flight(args, client):
if args.path:
descriptor = pyarrow.flight.FlightDescriptor.for_path(*args.path)
else:
descriptor = pyarrow.flight.FlightDescriptor.for_command(args.command)
info = client.get_flight_info(descriptor)
for endpoint in info.endpoints:
print('Ticket:', endpoint.ticket)
for location in endpoint.locations:
print(location)
get_client = pyarrow.flight.FlightClient.connect(location)
reader = get_client.do_get(endpoint.ticket)
df = reader.read_pandas()
print(df)
def _add_common_arguments(parser):
parser.add_argument('--tls', action='store_true')
parser.add_argument('--tls-roots', default=None)
parser.add_argument('host', type=str,
help="The host to connect to.")
def main():
parser = argparse.ArgumentParser()
subcommands = parser.add_subparsers()
cmd_list = subcommands.add_parser('list')
cmd_list.set_defaults(action='list')
_add_common_arguments(cmd_list)
cmd_list.add_argument('-l', '--list', action='store_true',
help="Print more details.")
cmd_do = subcommands.add_parser('do')
cmd_do.set_defaults(action='do')
_add_common_arguments(cmd_do)
cmd_do.add_argument('action_type', type=str,
help="The action type to run.")
cmd_get = subcommands.add_parser('get')
cmd_get.set_defaults(action='get')
_add_common_arguments(cmd_get)
cmd_get_descriptor = cmd_get.add_mutually_exclusive_group(required=True)
cmd_get_descriptor.add_argument('-p', '--path', type=str, action='append',
help="The path for the descriptor.")
cmd_get_descriptor.add_argument('-c', '--command', type=str,
help="The command for the descriptor.")
args = parser.parse_args()
if not hasattr(args, 'action'):
parser.print_help()
sys.exit(1)
commands = {
'list': list_flights,
'do': do_action,
'get': get_flight,
}
host, port = args.host.split(':')
port = int(port)
scheme = "grpc+tcp"
connection_args = {}
if args.tls:
scheme = "grpc+tls"
if args.tls_roots:
with open(args.tls_roots, "rb") as root_certs:
connection_args["tls_root_certs"] = root_certs.read()
client = pyarrow.flight.FlightClient.connect(f"{scheme}://{host}:{port}",
**connection_args)
while True:
try:
action = pyarrow.flight.Action("healthcheck", b"")
options = pyarrow.flight.FlightCallOptions(timeout=1)
list(client.do_action(action, options=options))
break
except pyarrow.ArrowIOError as e:
if "Deadline" in str(e):
print("Server is not ready, waiting...")
commands[args.action](args, client)
if __name__ == '__main__':
main()
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/decomposition/tests/test_incremental_pca.py | 43 | 10272 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| mit |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/io/data.py | 9 | 45748 | """
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
warnings.warn("\n"
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str, default: None
the data source ("yahoo", "google", "fred", or "ff")
start : datetime, default: None
left boundary for range (defaults to 1/1/2010)
end : datetime, default: None
right boundary for range (defaults to today)
retry_count : int, default 3
Number of times to retry query request.
pause : numeric, default 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with urlopen(url) as resp:
lines = resp.read()
except _network_error_classes:
pass
else:
rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
parse_dates=True, na_values='-')[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
#Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
except AttributeError:
#Python 3 string has no decode method.
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
return rs
raise IOError("after %d tries, %s did not "
"return a 200 for url %r" % (retry_count, name, url))
_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
def _get_hist_yahoo(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from yahoo.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
'&d=%s' % (end.month - 1) +
'&e=%s' % end.day +
'&f=%s' % end.year +
'&g=%s' % interval +
'&ignore=.csv')
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
def _get_hist_google(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from google.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
urlencode({"q": sym,
"startdate": start.strftime('%b %d, ' '%Y'),
"enddate": end.strftime('%b %d, %Y'),
"output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame or Panel with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = 'Open', 'High', 'Low', 'Close'
adj_ratio = hist_data['Adj Close'] / hist_data['Close']
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data['Adj_Ratio'] = adj_ratio
del data['Adj Close']
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = df.ix[1].notnull() & df.ix[0].isnull()
df.ix[0][mask] = 1
# Check for first stock listings after starting date of index in ret_index
# If True, find first_valid_index and set previous entry to 1.
if (~mask).any():
for sym in mask.index[~mask]:
tstamp = df[sym].first_valid_index()
t_idx = df.index.get_loc(tstamp) - 1
df[sym].ix[t_idx] = 1
return df
_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index('ticker')
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
def _dl_mult_symbols(symbols, start, end, interval, chunksize, retry_count, pause,
method):
stocks = {}
failed = []
passed = []
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
stocks[sym] = method(sym, start, end, interval, retry_count, pause)
passed.append(sym)
except IOError:
warnings.warn('Failed to read symbol: {0!r}, replacing with '
'NaN.'.format(sym), SymbolWarning)
failed.append(sym)
if len(passed) == 0:
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
try:
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for sym in failed:
stocks[sym] = df_na
return Panel(stocks).swapaxes('items', 'minor')
except AttributeError:
# cannot construct a panel with just 1D nans indicating no data
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
def _get_data_from(symbols, start, end, interval, retry_count, pause, adjust_price,
ret_index, chunksize, source):
src_fn = _source_functions[source]
# If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (compat.string_types, int)):
hist_data = src_fn(symbols, start, end, interval, retry_count, pause)
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
hist_data = _dl_mult_symbols(symbols.index, start, end, interval, chunksize,
retry_count, pause, src_fn)
else:
hist_data = _dl_mult_symbols(symbols, start, end, interval, chunksize,
retry_count, pause, src_fn)
if source.lower() == 'yahoo':
if ret_index:
hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close'])
if adjust_price:
hist_data = _adjust_prices(hist_data)
return hist_data
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25, interval='d'):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame, default: None
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default: 3
Number of times to retry query request.
pause : numeric, default: 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
adjust_price : bool, default: False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default: False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default: 25
Number of symbols to download consecutively before intiating pause.
interval : string, default: 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly and 'v' for dividend.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
if interval not in ['d', 'w', 'm', 'v']:
raise ValueError("Invalid interval: valid values are 'd', 'w', 'm' and 'v'")
return _get_data_from(symbols, start, end, interval, retry_count, pause,
adjust_price, ret_index, chunksize, 'yahoo')
def get_data_google(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Google Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default: 3
Number of times to retry query request.
pause : numeric, default: 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
chunksize : int, default: 25
Number of symbols to download consecutively before intiating pause.
ret_index : bool, default: False
If True, includes a simple return index 'Ret_Index' in hist_data.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
return _get_data_from(symbols, start, end, None, retry_count, pause,
adjust_price, ret_index, chunksize, 'google')
_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
Get data for the given name from the St. Louis FED (FRED).
Date format is datetime
Returns a DataFrame.
If multiple names are passed for "series" then the index of the
DataFrame is the outer join of the indicies of each series.
"""
start, end = _sanitize_dates(start, end)
if not is_list_like(name):
names = [name]
else:
names = name
urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
with urlopen(url) as resp:
data = read_csv(resp, index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
try:
return data.truncate(start, end)
except KeyError:
if data.ix[3].name[7:12] == 'Error':
raise IOError("Failed to get the data. Check that {0!r} is "
"a valid FRED series.".format(name))
raise
df = concat([fetch_data(url, n) for url, n in zip(urls, names)],
axis=1, join='outer')
return df
_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
def get_data_famafrench(name):
# path of zip files
zip_file_path = '{0}/{1}_TXT.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
with tempfile.TemporaryFile() as tmpf:
tmpf.write(raw)
with ZipFile(tmpf, 'r') as zf:
data = zf.open(zf.namelist()[0]).readlines()
line_lengths = np.array(lmap(len, data))
file_edges = np.where(line_lengths == 2)[0]
datasets = {}
edges = zip(file_edges + 1, file_edges[1:])
for i, (left_edge, right_edge) in enumerate(edges):
dataset = [d.split() for d in data[left_edge:right_edge]]
if len(dataset) > 10:
ncol_raw = np.array(lmap(len, dataset))
ncol = np.median(ncol_raw)
header_index = np.where(ncol_raw == ncol - 1)[0][-1]
header = dataset[header_index]
ds_header = dataset[header_index + 1:]
# to ensure the header is unique
header = ['{0} {1}'.format(j, hj) for j, hj in enumerate(header,
start=1)]
index = np.array([d[0] for d in ds_header], dtype=int)
dataset = np.array([d[1:] for d in ds_header], dtype=float)
datasets[i] = DataFrame(dataset, index, columns=header)
return datasets
# Items needed for options class
CUR_MONTH = dt.datetime.now().month
CUR_YEAR = dt.datetime.now().year
CUR_DAY = dt.datetime.now().day
def _two_char(s):
return '{0:0>2}'.format(s)
class Options(object):
"""
***Experimental***
This class fetches call/put data for a given stock/expiry month.
It is instantiated with a string representing the ticker symbol.
The class has the following methods:
get_options_data:(month, year, expiry)
get_call_data:(month, year, expiry)
get_put_data: (month, year, expiry)
get_near_stock_price(opt_frame, above_below)
get_all_data(call, put)
get_forward_data(months, call, put) (deprecated)
Examples
--------
# Instantiate object with ticker
>>> aapl = Options('aapl', 'yahoo')
# Fetch next expiry call data
>>> calls = aapl.get_call_data()
# Can now access aapl.calls instance variable
>>> aapl.calls
# Fetch next expiry put data
>>> puts = aapl.get_put_data()
# Can now access aapl.puts instance variable
>>> aapl.puts
# cut down the call data to be 3 below and 3 above the stock price.
>>> cut_calls = aapl.get_near_stock_price(call=True, above_below=3)
# Fetch call and put data with expiry from now to 8 months out
>>> forward_data = aapl.get_forward_data(8, call=True, put=True)
# Fetch all call and put data
>>> all_data = aapl.get_all_data()
"""
_TABLE_LOC = {'calls': 1, 'puts': 2}
_OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
_FINANCE_BASE_URL = 'http://finance.yahoo.com'
def __init__(self, symbol, data_source=None):
""" Instantiates options_data with a ticker saved as symbol """
self.symbol = symbol.upper()
if data_source is None:
warnings.warn("Options(symbol) is deprecated, use Options(symbol,"
" data_source) instead", FutureWarning, stacklevel=2)
data_source = "yahoo"
if data_source != "yahoo":
raise NotImplementedError("currently only yahoo supported")
def get_options_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_options() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls and appl.puts will always be the calls
and puts for the next expiry. If the user calls this method with
a different expiry, the ivar will be named callsYYMMDD or putsYYMMDD,
where YY, MM and DD are, respectively, two digit representations of
the year, month and day for the expiry of the options.
"""
return concat([f(month, year, expiry)
for f in (self.get_put_data,
self.get_call_data)]).sortlevel()
def _get_option_frames_from_yahoo(self, expiry):
url = self._yahoo_url_from_expiry(expiry)
option_frames = self._option_frames_from_url(url)
frame_name = '_frames' + self._expiry_to_string(expiry)
setattr(self, frame_name, option_frames)
return option_frames
@staticmethod
def _expiry_to_string(expiry):
m1 = _two_char(expiry.month)
d1 = _two_char(expiry.day)
return str(expiry.year)[-2:] + m1 + d1
def _yahoo_url_from_expiry(self, expiry):
try:
expiry_links = self._expiry_links
except AttributeError:
_, expiry_links = self._get_expiry_dates_and_links()
return self._FINANCE_BASE_URL + expiry_links[expiry]
def _option_frames_from_url(self, url):
frames = read_html(url)
nframes = len(frames)
frames_req = max(self._TABLE_LOC.values())
if nframes < frames_req:
raise RemoteDataError("%s options tables found (%s expected)" % (nframes, frames_req))
if not hasattr(self, 'underlying_price'):
try:
self.underlying_price, self.quote_time = self._underlying_price_and_time_from_url(url)
except IndexError:
self.underlying_price, self.quote_time = np.nan, np.nan
calls = frames[self._TABLE_LOC['calls']]
puts = frames[self._TABLE_LOC['puts']]
calls = self._process_data(calls, 'call')
puts = self._process_data(puts, 'put')
return {'calls': calls, 'puts': puts}
def _underlying_price_and_time_from_url(self, url):
root = self._parse_url(url)
underlying_price = self._underlying_price_from_root(root)
quote_time = self._quote_time_from_root(root)
return underlying_price, quote_time
@staticmethod
def _underlying_price_from_root(root):
underlying_price = root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\
.getchildren()[0].text
underlying_price = underlying_price.replace(',', '') #GH11
try:
underlying_price = float(underlying_price)
except ValueError:
underlying_price = np.nan
return underlying_price
@staticmethod
def _quote_time_from_root(root):
#Gets the time of the quote, note this is actually the time of the underlying price.
try:
quote_time_text = root.xpath('.//*[@class="time_rtq Fz-m"]')[0].getchildren()[1].getchildren()[0].text
##TODO: Enable timezone matching when strptime can match EST with %Z
quote_time_text = quote_time_text.split(' ')[0]
quote_time = dt.datetime.strptime(quote_time_text, "%I:%M%p")
quote_time = quote_time.replace(year=CUR_YEAR, month=CUR_MONTH, day=CUR_DAY)
except ValueError:
quote_time = np.nan
return quote_time
def _get_option_data(self, expiry, name):
frame_name = '_frames' + self._expiry_to_string(expiry)
try:
frames = getattr(self, frame_name)
except AttributeError:
frames = self._get_option_frames_from_yahoo(expiry)
option_data = frames[name]
if expiry != self.expiry_dates[0]:
name += self._expiry_to_string(expiry)
setattr(self, name, option_data)
return option_data
def get_call_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
call_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_call_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls will always be the calls for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named callsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, call=True, put=False)
def get_put_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
put_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
puts. See the following example:
>>> aapl = Options('aapl') # Create object
>>> aapl.puts # will give an AttributeError
>>> aapl.get_put_data() # Get data and set ivars
>>> aapl.puts # Doesn't throw AttributeError
return self.__setattr__(self, str(str(x) + str(y)))
Also note that aapl.puts will always be the puts for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named putsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, put=True, call=False)
def get_near_stock_price(self, above_below=2, call=True, put=False,
month=None, year=None, expiry=None):
"""
***Experimental***
Returns a data frame of options that are near the current stock price.
Parameters
----------
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken
call : bool, default: True
Tells the function whether or not it should be using calls
put : bool, default: False
Tells the function weather or not it should be using puts
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
chopped: DataFrame
The resultant DataFrame chopped down to be 2 * above_below + 1 rows
desired. If there isn't data as far out as the user has asked for
then
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
expiry = self._try_parse_dates(year, month, expiry)
data = self._get_data_in_date_range(expiry, call=call, put=put)
return self.chop_data(data, above_below, self.underlying_price)
def chop_data(self, df, above_below=2, underlying_price=None):
"""Returns a data frame only options that are near the current stock price."""
if not underlying_price:
try:
underlying_price = self.underlying_price
except AttributeError:
underlying_price = np.nan
max_strike = max(df.index.get_level_values('Strike'))
min_strike = min(df.index.get_level_values('Strike'))
if not np.isnan(underlying_price) and min_strike < underlying_price < max_strike:
start_index = np.where(df.index.get_level_values('Strike')
> underlying_price)[0][0]
get_range = slice(start_index - above_below,
start_index + above_below + 1)
df = df[get_range].dropna(how='all')
return df
def _try_parse_dates(self, year, month, expiry):
"""
Validates dates provided by user. Ensures the user either provided both a month and a year or an expiry.
Parameters
----------
year : int
Calendar year
month : int
Calendar month
expiry : date-like or convertible, (preferred)
Expiry date
Returns
-------
list of expiry dates (datetime.date)
"""
#Checks if the user gave one of the month or the year but not both and did not provide an expiry:
if (month is not None and year is None) or (month is None and year is not None) and expiry is None:
msg = "You must specify either (`year` and `month`) or `expiry` " \
"or none of these options for the next expiry."
raise ValueError(msg)
if expiry is not None:
if hasattr(expiry, '__iter__'):
expiry = [self._validate_expiry(exp) for exp in expiry]
else:
expiry = [self._validate_expiry(expiry)]
if len(expiry) == 0:
raise ValueError('No expiries available for given input.')
elif year is None and month is None:
#No arguments passed, provide next expiry
year = CUR_YEAR
month = CUR_MONTH
expiry = dt.date(year, month, 1)
expiry = [self._validate_expiry(expiry)]
else:
#Year and month passed, provide all expiries in that month
expiry = [expiry for expiry in self.expiry_dates if expiry.year == year and expiry.month == month]
if len(expiry) == 0:
raise ValueError('No expiries available in %s-%s' % (year, month))
return expiry
def _validate_expiry(self, expiry):
"""Ensures that an expiry date has data available on Yahoo
If the expiry date does not have options that expire on that day, return next expiry"""
expiry_dates = self.expiry_dates
expiry = to_datetime(expiry)
if hasattr(expiry, 'date'):
expiry = expiry.date()
if expiry in expiry_dates:
return expiry
else:
index = DatetimeIndex(expiry_dates).sort_values()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
above_below=2):
"""
***Experimental***
Gets either call, put, or both data for months starting in the current
month and going out in the future a specified amount of time.
Parameters
----------
months : number, int
How many months to go out in the collection of the data. This is
inclusive.
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=False)
Whether or not to collect data for put options.
near : bool, optional (default=False)
Whether this function should get only the data near the
current stock price. Uses Options.get_near_stock_price
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken if the near option is set to True
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
warnings.warn("get_forward_data() is deprecated", FutureWarning,
stacklevel=2)
end_date = dt.date.today() + MonthEnd(months)
dates = (date for date in self.expiry_dates if date <= end_date.date())
data = self._get_data_in_date_range(dates, call=call, put=put)
if near:
data = self.chop_data(data, above_below=above_below)
return data
def get_all_data(self, call=True, put=True):
"""
***Experimental***
Gets either call, put, or both data for all available months starting
in the current month.
Parameters
----------
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=True)
Whether or not to collect data for put options.
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
try:
expiry_dates = self.expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return self._get_data_in_date_range(dates=expiry_dates, call=call, put=put)
def _get_data_in_date_range(self, dates, call=True, put=True):
to_ret = Series({'calls': call, 'puts': put})
to_ret = to_ret[to_ret].index
data = []
for name in to_ret:
for expiry_date in dates:
nam = name + self._expiry_to_string(expiry_date)
try: # Try to access on the instance
frame = getattr(self, nam)
except AttributeError:
frame = self._get_option_data(expiry=expiry_date, name=name)
data.append(frame)
return concat(data).sortlevel()
@property
def expiry_dates(self):
"""
Returns a list of available expiry dates
"""
try:
expiry_dates = self._expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return expiry_dates
def _get_expiry_dates_and_links(self):
"""
Gets available expiry dates.
Returns
-------
Tuple of:
List of datetime.date objects
Dict of datetime.date objects as keys and corresponding links
"""
url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
root = self._parse_url(url)
try:
links = root.xpath('//*[@id="options_menu"]/form/select/option')
except IndexError:
raise RemoteDataError('Expiry dates not available')
expiry_dates = [dt.datetime.strptime(element.text, "%B %d, %Y").date() for element in links]
links = [element.attrib['data-selectbox-link'] for element in links]
if len(expiry_dates) == 0:
raise RemoteDataError('Data not available')
expiry_links = dict(zip(expiry_dates, links))
self._expiry_links = expiry_links
self._expiry_dates = expiry_dates
return expiry_dates, expiry_links
def _parse_url(self, url):
"""
Downloads and parses a URL, returns xml root.
"""
try:
from lxml.html import parse
except ImportError:
raise ImportError("Please install lxml if you want to use the "
"{0!r} class".format(self.__class__.__name__))
try:
doc = parse(url)
except _network_error_classes:
raise RemoteDataError("Unable to parse URL "
"{0!r}".format(url))
else:
root = doc.getroot()
if root is None:
raise RemoteDataError("Parsed URL {0!r} has no root"
"element".format(url))
return root
def _process_data(self, frame, type):
"""
Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares)
and Tag (the tag indicating what is actually deliverable, None if standard).
"""
frame.columns = ['Strike', 'Symbol', 'Last', 'Bid', 'Ask', 'Chg', 'PctChg', 'Vol', 'Open_Int', 'IV']
frame["Rootexp"] = frame.Symbol.str[0:-9]
frame["Root"] = frame.Rootexp.str[0:-6]
frame["Expiry"] = to_datetime(frame.Rootexp.str[-6:])
#Removes dashes in equity ticker to map to option ticker.
#Ex: BRK-B to BRKB140517C00100000
frame["IsNonstandard"] = frame['Root'] != self.symbol.replace('-', '')
del frame["Rootexp"]
frame["Underlying"] = self.symbol
try:
frame['Underlying_Price'] = self.underlying_price
frame["Quote_Time"] = self.quote_time
except AttributeError:
frame['Underlying_Price'] = np.nan
frame["Quote_Time"] = np.nan
frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True)
frame['Type'] = type
frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True)
return frame
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.