repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nixingyang/Kaggle-Competitions | Allstate Claims Severity/solution_XGBoost.py | 1 | 7068 | import os
import glob
import numpy as np
import pandas as pd
from xgboost.sklearn import XGBRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import ShuffleSplit
# Data Set
DATASET_FOLDER_PATH = "./"
INPUT_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "input")
TRAIN_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "train.csv")
TEST_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "test.csv")
SUBMISSION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "submission")
ID_COLUMN_NAME = "id"
LABEL_COLUMN_NAME = "loss"
# Training Procedure
CROSS_VALIDATION_NUM = 10
N_ESTIMATORS = 1000000
EARLY_STOPPING_ROUNDS = 200
def load_data():
# Read file content
train_file_content = pd.read_csv(TRAIN_FILE_PATH)
test_file_content = pd.read_csv(TEST_FILE_PATH)
combined_file_content = pd.concat([train_file_content, test_file_content])
del (train_file_content, test_file_content)
train_data_mask = combined_file_content[LABEL_COLUMN_NAME].notnull(
).as_matrix()
test_data_mask = combined_file_content[LABEL_COLUMN_NAME].isnull(
).as_matrix()
# Seperate the feature columns
feature_column_list = list(
combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME], axis=1))
categorical_feature_column_list = [
feature_column for feature_column in feature_column_list
if feature_column.startswith("cat")
]
# Process categorical features: remove obsolete unique values and factorize the values
for categorical_feature_column in categorical_feature_column_list:
unique_train_data_array = combined_file_content[
categorical_feature_column][train_data_mask].unique()
unique_test_data_array = combined_file_content[
categorical_feature_column][test_data_mask].unique()
unique_data_array_to_discard = np.setdiff1d(
np.union1d(unique_train_data_array, unique_test_data_array),
np.intersect1d(unique_train_data_array, unique_test_data_array))
if len(unique_data_array_to_discard) > 0:
discard_function = lambda input_value: np.nan if input_value in unique_data_array_to_discard else input_value
combined_file_content[
categorical_feature_column] = combined_file_content[
categorical_feature_column].apply(discard_function)
combined_file_content[categorical_feature_column], _ = pd.factorize(
combined_file_content[categorical_feature_column])
combined_file_content[categorical_feature_column] -= np.min(
combined_file_content[categorical_feature_column])
# Separate the training and testing data set
X_array = combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME],
axis=1).as_matrix()
Y_array = combined_file_content[LABEL_COLUMN_NAME].as_matrix()
ID_array = combined_file_content[ID_COLUMN_NAME].as_matrix()
X_train = X_array[train_data_mask]
Y_train = Y_array[train_data_mask]
X_test = X_array[test_data_mask]
ID_test = ID_array[test_data_mask]
submission_file_content = pd.DataFrame({
ID_COLUMN_NAME: ID_test,
LABEL_COLUMN_NAME: np.zeros(ID_test.shape[0])
})
return X_train, Y_train, X_test, submission_file_content
def ensemble_predictions():
def _ensemble_predictions(ensemble_func, ensemble_submission_file_name):
ensemble_proba = ensemble_func(proba_array, axis=0)
ensemble_submission_file_content.loc[:, proba_columns] = ensemble_proba
ensemble_submission_file_content.to_csv(os.path.join(
SUBMISSION_FOLDER_PATH, ensemble_submission_file_name),
index=False)
# Read predictions
submission_file_path_list = glob.glob(
os.path.join(SUBMISSION_FOLDER_PATH, "submission_*.csv"))
submission_file_content_list = [
pd.read_csv(submission_file_path)
for submission_file_path in submission_file_path_list
]
ensemble_submission_file_content = submission_file_content_list[0]
# Concatenate predictions
proba_columns = list(
set(ensemble_submission_file_content) - {ID_COLUMN_NAME})
proba_list = [
np.expand_dims(submission_file_content.as_matrix(proba_columns), axis=0)
for submission_file_content in submission_file_content_list
]
proba_array = np.vstack(proba_list)
# Ensemble predictions
for ensemble_func, ensemble_submission_file_name in \
zip([np.max, np.min, np.mean, np.median], ["max.csv", "min.csv", "mean.csv", "median.csv"]):
_ensemble_predictions(ensemble_func, ensemble_submission_file_name)
def run():
# Load data set
X_train, Y_train, X_test, submission_file_content = load_data()
Y_train = np.log(Y_train + 200)
# Cross validation
cross_validation_iterator = ShuffleSplit(n_splits=CROSS_VALIDATION_NUM,
test_size=0.1,
random_state=0)
for cross_validation_index, (train_index, valid_index) in enumerate(
cross_validation_iterator.split(X_train), start=1):
print("Working on {}/{} ...".format(cross_validation_index,
CROSS_VALIDATION_NUM))
submission_file_path = os.path.join(
SUBMISSION_FOLDER_PATH,
"submission_{}.csv".format(cross_validation_index))
if os.path.isfile(submission_file_path):
continue
model = XGBRegressor(learning_rate=0.01,
max_depth=12,
n_estimators=N_ESTIMATORS,
silent=False,
objective="reg:linear",
gamma=1,
min_child_weight=1,
subsample=0.8,
colsample_bytree=0.5,
reg_alpha=1,
seed=cross_validation_index,
nthread=-1)
model.fit(X_train[train_index],
Y_train[train_index],
eval_set=[(X_train[valid_index], Y_train[valid_index])],
eval_metric=lambda y_predicted, y_true:
("actual_mae",
mean_absolute_error(np.exp(y_true.get_label()),
np.exp(y_predicted))),
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
verbose=True)
# Perform the testing procedure
Y_test = model.predict(X_test)
# Save submission to disk
if not os.path.isdir(SUBMISSION_FOLDER_PATH):
os.makedirs(SUBMISSION_FOLDER_PATH)
submission_file_content[LABEL_COLUMN_NAME] = np.exp(Y_test) - 200
submission_file_content.to_csv(submission_file_path, index=False)
# Perform ensembling
ensemble_predictions()
print("All done!")
if __name__ == "__main__":
run()
| mit |
lpryszcz/bin | bam2ploidy.py | 1 | 13625 | #!/usr/bin/env python
desc="""Report chromosome/contig ploidy
TBA:
- some function fitting ploidy with int
- same for alt alleles
"""
epilog="""Author:
[email protected]
Warsaw, 13/12/2017
"""
import os, sys, pysam, resource
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy import stats, signal
from itertools import izip
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
alphabet = "ACGT" # i=insertion d=deletion
base2index = {b: i for i, b in enumerate(alphabet)}
for i, b in enumerate(alphabet.lower()):
base2index[b] = i
# CIGAR operations
"""Op BAM Description +1Q +1R
M 0 alignment match (can be a sequence match or mismatch) yes yes
I 1 insertion to the reference yes no
D 2 deletion from the reference no yes
N 3 skipped region from the reference no yes
S 4 soft clipping (clipped sequences present in SEQ) yes no
H 5 hard clipping (clipped sequences NOT present in SEQ) no no
P 6 padding (silent deletion from padded reference) no no
= 7 sequence match yes yes
X 8 sequence mismatch yes yes
"""
def _match(refi, readi, bases): return refi+bases, readi+bases, True
def _insertion(refi, readi, bases): return refi, readi+bases, False
def _deletion(refi, readi, bases): return refi+bases, readi, False
def _skip(refi, readi, bases): return refi, readi, False
code2function = {0: _match, 7: _match, 8: _match, 1: _insertion, 6: _insertion,
2: _deletion, 3: _deletion, 4: _insertion, 5: _skip}
def store_blocks(a, start, end, baseq, basesize, calls):
"""Return tuple of aligned position of query and reference."""
readi, refi = 0, a.pos
for ci, (code, bases) in enumerate(a.cigar):
prefi, preadi = refi, readi
refi, readi, data = code2function[code](refi, readi, bases)
# skip if current before start
if refi<=start:
continue
# typical alignment part
if data:
if prefi<start:
bases -= start-prefi
preadi += start-prefi
prefi = start
if refi>end:
bases -= refi-end
if bases<1:
break
for ii, (b, q) in enumerate(zip(a.seq[preadi:preadi+bases], a.query_qualities[preadi:preadi+bases])):
if q>=baseq and b in base2index:
calls[(prefi-start)*basesize+ii*basesize+base2index[b]] += 1
def is_qcfail(a, mapq=15):
"""Return True if alignment record fails quality checks"""
if a.mapq<mapq or a.flag&3840:
return True
def get_freqhist():
"""Return freqbins and freqhist"""
freqbins = np.arange(.0, 1.01, 0.01)
freqhist = np.zeros(len(freqbins), dtype='uint32')
return freqbins, freqhist
def bam2cov_freq(bam, region, minDepth, mapq=15, baseq=20, minreads=3):
"""Return 2 arrays of per position coverage and max base frequency histogram"""
ref, start, end = region
sam = pysam.AlignmentFile(bam)
# ACGT x2 for each strand
strandsNo = 1
basesize = strandsNo*len(alphabet)
n = basesize * (end-start+1)
calls = np.zeros(n, dtype="int64") # for compatibility with list
# freqhist
freqbins, freqhist = get_freqhist()
# stop if ref not in sam file
if ref not in sam.references:
if ref.startswith('chr') and ref[3:] in sam.references:
ref = ref[3:]
elif 'chr%s'%ref in sam.references:
ref = 'chr%s'%ref
else:
return np.array([], dtype='uint16'), freqhist
# process alignments
pa = None
for a in sam.fetch(ref, start, end):
if is_qcfail(a, mapq):
continue
pa = a
store_blocks(a, start, end, baseq, basesize, calls)
"""for refi, block in get_blocks(a, start, end, baseq, basesize):
s, e = basesize*(refi-start), basesize*(refi-start)+len(block)
calls[s:e] += block"""
# reshape
calls = calls.reshape((end-start+1, len(alphabet)))
# skip calls by less than 3 reads
calls[calls<minreads] = 0
# get coverage
cov = np.array(calls.sum(axis=1), dtype='uint16')
# get freq
freqs = 100*calls[cov>=minDepth] / cov[cov>=minDepth, None]
for i, c in zip(*np.unique(freqs, return_counts=1)):
if i>=len(freqhist): continue
freqhist[i] = c
return cov, freqhist
def worker(args):
# ignore all warnings
np.seterr(all='ignore')
bam, region, minDepth, mapq, bcq, minreads = args
return bam2cov_freq(bam, region, minDepth, mapq, bcq, minreads)
def bam2regions(bam, chrs=[], maxfrac=0.05, step=100000, verbose=0):
"""Return chromosome windows"""
regions, refs, lens = [], [], []
sam = pysam.Samfile(bam)
references, lengths = sam.references, sam.lengths
for ref, length in izip(references, lengths):
# skip if chr not selected or too small
if chrs and ref not in chrs or length<maxfrac*max(lengths):
if verbose:
sys.stderr.write(" %s skipped\n"%ref)
continue
refs.append(ref)
lens.append(length)
for s in xrange(1, length-step, step):
regions.append((ref, s, s+step-1))
# last window is shorter
regions.append((ref, s+step, length))
return regions, refs, lens
def get_stats(covs, freqs, chrlen, minAltFreq=10, q=2):
"""Return coverage median, mean and stdev"""
cov = np.concatenate(covs, axis=0)
if cov.sum()<100: return 0, 0, 0, []
# get rid of left / right 5 percentile
mincov, maxcov = stats.scoreatpercentile(cov, q), stats.scoreatpercentile(cov, 100-q)
cov = cov[np.all(np.array([cov<maxcov, cov>mincov]), axis=0)]
if cov.sum()<100: return 0, 0, 0, []
return np.median(cov), cov.mean(), cov.std(), freqs
def bam2ploidy(bam, minDepth=10, minAltFreq=10, mapq=3, bcq=20, threads=4, chrs=[], minfrac=0.05, minreads=3, verbose=1):
"""Get alternative base coverage and frequency for every bam file"""
# exit if processed
outfn = "%s.ploidy.tsv"%bam
if os.path.isfile(outfn) and open(outfn).readline():
if verbose: sys.stderr.write(" Outfile exists or not empty: %s\n"%outfn)
return outfn
# make sure indexed
logger(" %s"%bam)
if not os.path.isfile(bam+".bai"):
logger(" Indexing...")
cmd = "samtools index %s"%bam
if verbose:
sys.stderr.write(" %s\n"%cmd)
os.system(cmd)
# get regions
regions, refs, lens = bam2regions(bam, chrs, minfrac)#; print regions[:1000]
chr2len = {r: l for r, l in zip(refs, lens)}
# this is useful for debugging
i = 0
if threads<2:
import itertools
p = itertools
else:
p = Pool(threads)
parser = p.imap(worker, ((bam, r, minDepth, mapq, bcq, minreads) for r in regions))
# process regions
ref2stats = {}
pref, covs = "", []
freqbins, freqhist = get_freqhist()
for i, ((ref, start, end), (_cov, _freqhist)) in enumerate(izip(regions, parser), 1):
sys.stderr.write(" %s / %s %s:%s-%s \r"%(i, len(regions), ref, start, end))
if ref!=pref:
if covs:
ref2stats[pref] = get_stats(covs, freqhist, chr2len[pref], minAltFreq)
# reset
pref, covs = ref, []
freqbins, freqhist = get_freqhist()
freqhist += _freqhist
covs.append(_cov)
# process last output
if covs:
ref2stats[pref] = get_stats(covs, freqhist, chr2len[pref], minAltFreq)
# report
oline = "%s\t%s\t%.2f\t%.2f\t%s\n"
with open(outfn, "w") as out:
out.write("#ref\tlen\tcov\tstdev\tfreq_histogram\n")
for r, l in izip(refs, lens):
out.write(oline%(r, l, ref2stats[r][1], ref2stats[r][2], ",".join(map(str, ref2stats[r][-1]))))
return outfn
def logger(info, add_timestamp=1, add_memory=1, out=sys.stderr):
"""Report nicely formatted stream to stderr"""
memory = timestamp = ""
if add_timestamp:
timestamp = "[%s] "%datetime.ctime(datetime.now())
if add_memory:
selfmem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.
childrenmem = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss / 1024.
memory = " [memory: %7.1f Mb]"%(childrenmem + selfmem, ) #; %7.1f Mb self
out.write("%s%s%s\n"%(timestamp, info, memory))
def plot(outbase, fnames, chrs, chr2data, minAltFreq=10, ext="png"):
"""Save freq histograms"""
freqbins, freqs = get_freqhist()
outfn = "%s.%s"%(outbase, ext)
logger("Saving figure %s..."%outfn)
fig, axes = plt.subplots(figsize=(9*len(chrs)+1, 4*len(fnames)+1), nrows=len(fnames), ncols=len(chrs), sharex=True)
fig.suptitle("Histograms of SNP frequencies")
for j, (r, data) in enumerate(zip(chrs, chr2data)):
for i, (fn, (freqs, ploidy, modes)) in enumerate(zip(fnames, data)):
if not sum(freqs):
freqs = np.zeros(freqbins.shape)
ax = axes[i][j]
ax.bar(freqbins[minAltFreq:-minAltFreq], freqs[minAltFreq:-minAltFreq], width=0.01)
ax.set_title("%s %s\nploidy:%s modes:%s"%(os.path.basename(fn)[:-15], r, ploidy, modes))
if not j: ax.set_ylabel("counts")
ax.set_xlim(0, 1)
ax.set_xlabel("Allele frequency")
fig.savefig(outfn, dpi=100)
def report(outbase, fnames, minAltFreq=10, verbose=0, order=5):
"""Report final table with ploidy and freq modes"""
olines = [["# chr", "len"] + ["%s\tmodes"%fn for fn in fnames]]
chrs, lens = [], []
for fn in fnames:
ldata = [l[:-1].split('\t') for l in open(fn) if not l.startswith('#')]
if len(olines)<2:
olines += [ld[:2] for ld in ldata]
chrs = [ld[0] for ld in ldata]
lens = map(int, [ld[1] for ld in ldata])
chr2data = [[] for i in range(len(chrs))]
# get min cov ==> ploidy 1
covstats = np.array(map(float, [ld[2] for ld in ldata]))
mincov = min(covstats[covstats>covstats.mean()*0.1])
ploidy = covstats / mincov
# process all
for i, (chrlen, ld, p) in enumerate(zip(lens, ldata, ploidy), 1):
# recalculate modes
freqs = np.array(map(int, ld[-1].split(','))) if ld[-1] else np.array([])
if freqs[minAltFreq:-minAltFreq].sum()<chrlen*.001:
modes = []
else:
modes = signal.argrelmax(freqs, order=order)[0] #freqs[:100:2]+freqs[1::2]
# report
ploidy, modes = "%.2f"%float(p), ",".join(map(str, modes))
olines[i] += [ploidy, modes]
chr2data[i-1].append((freqs, ploidy, modes))
# report & plot
outfn = outbase+".tsv"
logger("Reporting ploidy to %s"%outfn)
with open(outfn, "w") as out:
out.write("\n".join("\t".join(ol) for ol in olines)+"\n")
plot(outbase, fnames, chrs, chr2data, minAltFreq)
def main():
import argparse
usage = "%(prog)s [options]"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-v", "--verbose", action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.20a')
parser.add_argument("-i", "--bams", nargs="+", help="input BAM file(s)")
parser.add_argument("-o", "--outbase", default="bam2ploidy", help="output basename [%(default)s]")
parser.add_argument("-q", "--mapq", default=10, type=int, help="mapping quality [%(default)s]")
parser.add_argument("-Q", "--bcq", default=20, type=int, help="basecall quality [%(default)s]")
parser.add_argument("-t", "--threads", default=4, type=int, help="number of cores to use [%(default)s]")
parser.add_argument("-c", "--chrs", nargs="*", default=[], help="analyse selected chromosomes [all]")
parser.add_argument("--minDepth", default=10, type=int, help="minimal depth of coverage for genotyping [%(default)s]")
parser.add_argument("--minAltFreq", default=10, type=int, help="min frequency for DNA base in sample [%(default)s]")
parser.add_argument("--minfrac", default=0.05, type=float, help="min length of chr/contig as fraction of the longest chr [%(default)s]")
parser.add_argument("--minreads", default=3, type=int, help="min number of reads to call alt allele [%(default)s]")
# print help if no parameters
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
# check if all files exists
for fn in o.bams:
if not os.path.isfile(fn):
sys.stderr.write("No such file: %s\n"%fn)
sys.exit(1)
# create outdirs
if os.path.dirname(o.outbase) and not os.path.isdir(os.path.dirname(o.outbase)):
os.makedirs(os.path.dirname(o.outbase))
logger("Processing %s BAM file(s)..."%len(o.bams))
fnames = []
for bam in o.bams:
outfn = bam2ploidy(bam, o.minDepth, o.minAltFreq, o.mapq, o.bcq, o.threads, o.chrs, o.minfrac, o.minreads, o.verbose)
fnames.append(outfn)
report(o.outbase, fnames, o.minAltFreq, o.verbose)
logger("Finished!")
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n" % dt)
| gpl-3.0 |
twosigma/ngrid | test/sample0.py | 1 | 1624 | from __future__ import division
from collections import OrderedDict
from datetime import datetime, date
from pytz import UTC, timezone
import numpy as np
import pandas as pd
import cPickle as pickle
import random
LETTERS = "abcdefghijklmnopqrstuvwxyz"
def random_word():
length = random.randint(3, 12)
return "".join( random.choice(LETTERS) for _ in range(length) )
N = 65563
cols = OrderedDict()
cols.update(
("flag{}".format(i), np.random.randint(0, 2, (N, )).astype(bool))
for i in range(4)
)
cols.update(
("normal{}".format(i), np.random.standard_normal((N, )))
for i in range(4)
)
cols.update(
("exp{}".format(i), np.exp(np.random.random((N, )) * 20))
for i in range(2)
)
cols.update(
("expexp{}".format(i), np.exp(np.exp(np.random.random((N, )) * 5)))
for i in range(2)
)
cols.update(
("count{}".format(i), np.exp(np.random.random((N, )) * 20).astype("int32"))
for i in range(8)
)
cols.update(
("word{}".format(i), [ random_word() for _ in range(N) ])
for i in range(4)
)
cols.update(
("sig{}".format(i), np.random.randint(0, 10 ** i, (N, )) / 10 ** (i - 1))
for i in range(2, 5)
)
cols.update(
("negsig{}".format(i), np.random.randint(-(10 ** i), 10 ** i, (N, )) / 10 ** (i - 1))
for i in range(2, 5)
)
cols.update(
("datetime{}".format(i), (np.random.uniform(631152000e9, 1577836800e9, (N, ))).astype("datetime64[ns]"))
for i in range(4)
)
df = pd.DataFrame(cols)
with open("sample0.pickle", "wb") as file:
pickle.dump(df, file, pickle.HIGHEST_PROTOCOL)
df.to_csv("sample0.csv")
| bsd-3-clause |
jchodera/MSMs | attic/src/code/hmsm/plot_assign.py | 3 | 1331 | import numpy as np
import pandas as pd
import mdtraj as md
from mixtape.utils import iterobjects, assign
import mixtape.ghmm, mixtape.featurizer
import sklearn.hmm
import os
name = "tica"
json_filename = "./%s.jsonlines" % name
feature_filename = "./%s.pkl" % name
featurizer = mixtape.featurizer.load(feature_filename)
models = list(iterobjects(json_filename))
df = pd.DataFrame(models)
x = df.ix[0]
T = np.array(x["transmat"])
p = np.array(x["populations"])
n_states = len(p)
model = mixtape.ghmm.GaussianFusionHMM(n_states, featurizer.n_features)
model.means_ = x["means"]
model.vars_ = x["vars"]
model.transmat_ = x["transmat"]
model.populations_ = x["populations"]
means = model.means_
covars = model.vars_
#n_traj = 348
#n_traj = 131
n_traj = 1
all_assignments = []
all_probs = []
for i in range(n_traj):
print(i)
traj = md.load("./Trajectories/trj%d.h5" % i)
ass, probs = assign(featurizer, traj, model)
ass_assignments.extend(ass)
all_probs.extend(probs)
all_assignments = np.array(all_assignments)
all_probs = np.array(all_probs)
traj = md.load("./Trajectories/trj%d.h5" % 50)
traj.superpose(trj0, atom_indices=atom_indices)
diff2 = (traj.xyz[:, atom_indices] - trj0.xyz[0, atom_indices]) ** 2
data = np.sqrt(np.sum(diff2, axis=2))
ass = hmm.predict(data)
rmsd = md.rmsd(traj, trj0)
| gpl-2.0 |
simplegeo/shapely | examples/intersect.py | 24 | 2626 | # intersect.py
#
# Demonstrate how Shapely can be used to analyze and plot the intersection of
# a trajectory and regions in space.
from functools import partial
import random
import pylab
from shapely.geometry import LineString, Point
from shapely.ops import cascaded_union
# Build patches as in dissolved.py
r = partial(random.uniform, -20.0, 20.0)
points = [Point(r(), r()) for i in range(100)]
spots = [p.buffer(2.5) for p in points]
patches = cascaded_union(spots)
# Represent the following geolocation parameters
#
# initial position: -25, -25
# heading: 45.0
# speed: 50*sqrt(2)
#
# as a line
vector = LineString(((-25.0, -25.0), (25.0, 25.0)))
# Find intercepted and missed patches. List the former so we can count them
# later
intercepts = [patch for patch in patches.geoms if vector.intersects(patch)]
misses = (patch for patch in patches.geoms if not vector.intersects(patch))
# Plot the intersection
intersection = vector.intersection(patches)
assert intersection.geom_type in ['MultiLineString']
if __name__ == "__main__":
# Illustrate the results using matplotlib's pylab interface
pylab.figure(num=None, figsize=(4, 4), dpi=180)
# Plot the misses
for spot in misses:
x, y = spot.exterior.xy
pylab.fill(x, y, color='#cccccc', aa=True)
pylab.plot(x, y, color='#999999', aa=True, lw=1.0)
# Do the same for the holes of the patch
for hole in spot.interiors:
x, y = hole.xy
pylab.fill(x, y, color='#ffffff', aa=True)
pylab.plot(x, y, color='#999999', aa=True, lw=1.0)
# Plot the intercepts
for spot in intercepts:
x, y = spot.exterior.xy
pylab.fill(x, y, color='red', alpha=0.25, aa=True)
pylab.plot(x, y, color='red', alpha=0.5, aa=True, lw=1.0)
# Do the same for the holes of the patch
for hole in spot.interiors:
x, y = hole.xy
pylab.fill(x, y, color='#ffffff', aa=True)
pylab.plot(x, y, color='red', alpha=0.5, aa=True, lw=1.0)
# Draw the projected trajectory
pylab.arrow(-25, -25, 50, 50, color='#999999', aa=True,
head_width=1.0, head_length=1.0)
for segment in intersection.geoms:
x, y = segment.xy
pylab.plot(x, y, color='red', aa=True, lw=1.5)
# Write the number of patches and the total patch area to the figure
pylab.text(-28, 25,
"Patches: %d/%d (%d), total length: %.1f" \
% (len(intercepts), len(patches.geoms),
len(intersection.geoms), intersection.length))
pylab.savefig('intersect.png')
| bsd-3-clause |
yask123/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
xiaoxiamii/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
pySTEPS/pysteps | pysteps/visualization/animations.py | 1 | 14780 | # -*- coding: utf-8 -*-
"""
pysteps.visualization.animations
================================
Functions to produce animations for pysteps.
.. autosummary::
:toctree: ../generated/
animate
"""
import os
import warnings
import matplotlib.pylab as plt
import pysteps as st
PRECIP_VALID_TYPES = ("ensemble", "mean", "prob")
PRECIP_DEPRECATED_ARGUMENTS = (
"units",
"colorbar",
"colorscale",
) # TODO: remove in version >= 1.6
MOTION_VALID_METHODS = ("quiver", "streamplot")
def animate(
precip_obs,
precip_fct=None,
timestamps_obs=None,
timestep_min=None,
motion_field=None,
ptype="ensemble",
motion_plot="quiver",
geodata=None,
title=None,
prob_thr=None,
display_animation=True,
nloops=1,
time_wait=0.2,
savefig=False,
fig_dpi=100,
fig_format="png",
path_outputs="",
precip_kwargs=None,
motion_kwargs=None,
map_kwargs=None,
**kwargs,
):
"""Function to animate observations and forecasts in pysteps.
It also allows to export the individual frames as figures, which
is useful for constructing animated GIFs or similar.
.. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
Parameters
----------
precip_obs: array-like
Three-dimensional array containing the time series of observed
precipitation fields.
precip_fct: array-like, optional
The three or four-dimensional (for ensembles) array
containing the time series of forecasted precipitation field.
timestamps_obs: list of datetimes, optional
List of datetime objects corresponding to the time stamps of
the fields in precip_obs.
timestep_min: float, optional
The time resolution in minutes of the forecast.
motion_field: array-like, optional
Three-dimensional array containing the u and v components of
the motion field.
motion_plot: string, optional
The method to plot the motion field. See plot methods in
:py:mod:`pysteps.visualization.motionfields`.
geodata: dictionary or None, optional
Dictionary containing geographical information about
the field.
If geodata is not None, it must contain the following key-value pairs:
.. tabularcolumns:: |p{1.5cm}|L|
+----------------+----------------------------------------------------+
| Key | Value |
+================+====================================================+
| projection | PROJ.4-compatible projection definition |
+----------------+----------------------------------------------------+
| x1 | x-coordinate of the lower-left corner of the data |
| | raster |
+----------------+----------------------------------------------------+
| y1 | y-coordinate of the lower-left corner of the data |
| | raster |
+----------------+----------------------------------------------------+
| x2 | x-coordinate of the upper-right corner of the data |
| | raster |
+----------------+----------------------------------------------------+
| y2 | y-coordinate of the upper-right corner of the data |
| | raster |
+----------------+----------------------------------------------------+
| yorigin | a string specifying the location of the first |
| | element in the data raster w.r.t. y-axis: |
| | 'upper' = upper border, 'lower' = lower border |
+----------------+----------------------------------------------------+
title: str or None, optional
If not None, print the string as title on top of the plot.
ptype: {'ensemble', 'mean', 'prob'}, str, optional
Type of the plot to animate. 'ensemble' = ensemble members,
'mean' = ensemble mean, 'prob' = exceedance probability
(using threshold defined in prob_thrs).
prob_thr: float, optional
Intensity threshold for the exceedance probability maps. Applicable
if ptype = 'prob'.
display_animation: bool, optional
If set to True, display the animation (set to False if only
interested in saving the animation frames).
nloops: int, optional
The number of loops in the animation.
time_wait: float, optional
The time in seconds between one frame and the next. Applicable
if display_animation is True.
savefig: bool, optional
If set to True, save the individual frames into path_outputs.
fig_dpi: float, optional
The resolution in dots per inch. Applicable if savefig is True.
fig_format: str, optional
Filename extension. Applicable if savefig is True.
path_outputs: string, optional
Path to folder where to save the frames. Applicable if savefig is True.
precip_kwargs: dict, optional
Optional parameters that are supplied to
:py:func:`pysteps.visualization.precipfields.plot_precip_field`.
motion_kwargs: dict, optional
Optional parameters that are supplied to
:py:func:`pysteps.visualization.motionfields.quiver` or
:py:func:`pysteps.visualization.motionfields.streamplot`.
map_kwargs: dict, optional
Optional parameters that need to be passed to
:py:func:`pysteps.visualization.basemaps.plot_geography`.
Returns
-------
None
"""
if precip_kwargs is None:
precip_kwargs = {}
if motion_kwargs is None:
motion_kwargs = {}
if map_kwargs is None:
map_kwargs = {}
if precip_fct is not None:
if len(precip_fct.shape) == 3:
precip_fct = precip_fct[None, ...]
n_lead_times = precip_fct.shape[1]
n_members = precip_fct.shape[0]
else:
n_lead_times = 0
n_members = 1
if title is not None and isinstance(title, str):
title_first_line = title + "\n"
else:
title_first_line = ""
if motion_plot not in MOTION_VALID_METHODS:
raise ValueError(
f"Invalid motion plot method '{motion_plot}'."
f"Supported: {str(MOTION_VALID_METHODS)}"
)
if ptype not in PRECIP_VALID_TYPES:
raise ValueError(
f"Invalid precipitation type '{ptype}'."
f"Supported: {str(PRECIP_VALID_TYPES)}"
)
# TODO: remove in version >= 1.6
if "type" in kwargs:
warnings.warn(
"The 'type' keyword will be deprecated in version 1.6. "
"Use 'ptype' instead."
)
ptype = kwargs.get("type")
# TODO: remove in version >= 1.6
if "timestamps" in kwargs:
warnings.warn(
"The 'timestamps' keyword will be deprecated in version 1.6. "
"Use 'timestamps_obs' instead."
)
timestamps_obs = kwargs.get("timestamps")
# TODO: remove in version >= 1.6
if "plotanimation" in kwargs:
warnings.warn(
"The 'plotanimation' keyword will be deprecated in version 1.6. "
"Use 'display_animation' instead."
)
display_animation = kwargs.get("timestamps")
# TODO: remove in version >= 1.6
for depr_key in PRECIP_DEPRECATED_ARGUMENTS:
if depr_key in kwargs:
warnings.warn(
f"The {depr_key} argument will be deprecated in version 1.6. "
"Add it to 'precip_kwargs' instead."
)
precip_kwargs[depr_key] = kwargs.get(depr_key)
if timestamps_obs is not None:
if len(timestamps_obs) != precip_obs.shape[0]:
raise ValueError(
f"The number of timestamps does not match the size of precip_obs: "
f"{len(timestamps_obs)} != {precip_obs.shape[0]}"
)
if precip_fct is not None:
reftime_str = timestamps_obs[-1].strftime("%Y%m%d%H%M")
else:
reftime_str = timestamps_obs[0].strftime("%Y%m%d%H%M")
else:
reftime_str = None
if ptype == "prob" and prob_thr is None:
raise ValueError("ptype 'prob' needs a prob_thr value")
if ptype != "ensemble":
n_members = 1
n_obs = precip_obs.shape[0]
loop = 0
while loop < nloops:
for n in range(n_members):
for i in range(n_obs + n_lead_times):
plt.clf()
# Observations
if i < n_obs and (display_animation or n == 0):
title = title_first_line + "Analysis"
if timestamps_obs is not None:
title += (
f" valid for {timestamps_obs[i].strftime('%Y-%m-%d %H:%M')}"
)
plt.clf()
if ptype == "prob":
prob_field = st.postprocessing.ensemblestats.excprob(
precip_obs[None, i, ...], prob_thr
)
ax = st.plt.plot_precip_field(
prob_field,
ptype="prob",
geodata=geodata,
probthr=prob_thr,
title=title,
map_kwargs=map_kwargs,
**precip_kwargs,
)
else:
ax = st.plt.plot_precip_field(
precip_obs[i, :, :],
geodata=geodata,
title=title,
map_kwargs=map_kwargs,
**precip_kwargs,
)
if motion_field is not None:
if motion_plot == "quiver":
st.plt.quiver(
motion_field, ax=ax, geodata=geodata, **motion_kwargs
)
elif motion_plot == "streamplot":
st.plt.streamplot(
motion_field, ax=ax, geodata=geodata, **motion_kwargs
)
if savefig & (loop == 0):
figtags = [reftime_str, ptype, f"f{i:02d}"]
figname = "_".join([tag for tag in figtags if tag])
filename = os.path.join(path_outputs, f"{figname}.{fig_format}")
plt.savefig(filename, bbox_inches="tight", dpi=fig_dpi)
print("saved: ", filename)
# Forecasts
elif i >= n_obs and precip_fct is not None:
title = title_first_line + "Forecast"
if timestamps_obs is not None:
title += f" valid for {timestamps_obs[-1].strftime('%Y-%m-%d %H:%M')}"
if timestep_min is not None:
title += " +%02d min" % ((1 + i - n_obs) * timestep_min)
else:
title += " +%02d" % (1 + i - n_obs)
plt.clf()
if ptype == "prob":
prob_field = st.postprocessing.ensemblestats.excprob(
precip_fct[:, i - n_obs, :, :], prob_thr
)
ax = st.plt.plot_precip_field(
prob_field,
ptype="prob",
geodata=geodata,
probthr=prob_thr,
title=title,
map_kwargs=map_kwargs,
**precip_kwargs,
)
elif ptype == "mean":
ens_mean = st.postprocessing.ensemblestats.mean(
precip_fct[:, i - n_obs, :, :]
)
ax = st.plt.plot_precip_field(
ens_mean,
geodata=geodata,
title=title,
map_kwargs=map_kwargs,
**precip_kwargs,
)
else:
ax = st.plt.plot_precip_field(
precip_fct[n, i - n_obs, ...],
geodata=geodata,
title=title,
map_kwargs=map_kwargs,
**precip_kwargs,
)
if motion_field is not None:
if motion_plot == "quiver":
st.plt.quiver(
motion_field, ax=ax, geodata=geodata, **motion_kwargs
)
elif motion_plot == "streamplot":
st.plt.streamplot(
motion_field, ax=ax, geodata=geodata, **motion_kwargs
)
if ptype == "ensemble" and n_members > 1:
plt.text(
0.01,
0.99,
"m %02d" % (n + 1),
transform=ax.transAxes,
ha="left",
va="top",
)
if savefig & (loop == 0):
figtags = [reftime_str, ptype, f"f{i:02d}", f"m{n + 1:02d}"]
figname = "_".join([tag for tag in figtags if tag])
filename = os.path.join(path_outputs, f"{figname}.{fig_format}")
plt.savefig(filename, bbox_inches="tight", dpi=fig_dpi)
print("saved: ", filename)
if display_animation:
plt.pause(time_wait)
if display_animation:
plt.pause(2 * time_wait)
loop += 1
plt.close()
| bsd-3-clause |
mjescobar/RF_Estimation | Clustering/clustering/SpectralClustering.py | 2 | 6225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs SpectralClustering using scikit-learn
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
from sklearn.cluster import SpectralClustering
import argparse #argument parsing
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
from sklearn import metrics
clustersColours = ['#fcfa00', '#ff0000', '#820c2c', '#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff','#0c820e','#28ea04','#ea8404','#c8628f','#6283ff','#5b6756','#0c8248','k','#820cff','#932c11','#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='kmeans_scikit.py',
description='Performs K-means using scikit-learn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='5', choices=[3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--pcaComponents',
help='Number of components for PCA',
type=int, default='4', required=False)
parser.add_argument('--doPCA',
help='Performs clusterings with PCA or not',
type=bool, default=False, required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 5 as we are incorporating the 2 dimensions of the ellipse,
#x position, y position and angle
dataCluster = np.zeros((1,framesNumber+5))
units=[]
dato=np.zeros((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#should we use the not-gaussian-fitted data for clustering?
dataUnitGauss = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#A radius of the RF ellipse
dato[0]=fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitGauss,dato),1)
#B radius of the RF ellipse
dato[0]=fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
dato[0]=fitResult[0][1]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
dato[0]=fitResult[0][4]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
dato[0]=fitResult[0][5]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
dataCluster = np.append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:framesNumber+2]
sc = SpectralClustering(n_clusters=clustersNumber, eigen_solver=None, random_state=None, n_init=10, gamma=1.0, affinity='nearest_neighbors', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None)
sc.fit(data)
labels = sc.labels_
fit = metrics.silhouette_score(data, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'no_pca.png',clustersColours, fit)
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,framesNumber+5))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,framesNumber+5))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
# remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla,outputFolder+'Grilla_'+str(clusterId)+'.png',clustersColours[clusterId],framesNumber,xSize,ySize)
rfe.graficaCluster(labels, dataGrilla[:,0:framesNumber-1], outputFolder+'cluster_'+str(clusterId)+'.png',clustersColours[clusterId])
rfe.guardaClustersIDs(outputFolder,units,labels,outputFolder+'clustering_no_pca.csv')
if args.doPCA:
pca = PCA(n_components=args.pcaComponents)
newData = pca.fit_transform(data)
sc.fit(newData)
fit = metrics.silhouette_score(newData, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'pca.png',clustersColours,fit)
rfe.guardaClustersIDs(outputFolder,units,labels,outputFolder+'clustering_pca.csv')
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
nickcdryan/rep | rep/metaml/folding.py | 3 | 8928 | """
This is specific meta-algorithm based on the idea of cross-validation.
"""
from __future__ import division, print_function, absolute_import
import numpy
from sklearn import clone
from six.moves import zip
from . import utils
from sklearn.cross_validation import KFold
from sklearn.utils.validation import check_random_state
from .factory import train_estimator
from ..estimators.interface import Classifier
from ..estimators.utils import check_inputs
__author__ = 'Tatiana Likhomanenko'
class FoldingClassifier(Classifier):
"""
This meta-classifier implements folding algorithm:
* training data is splitted into n equal parts;
* then n times union of n-1 parts is used to train classifier;
* at the end we have n-estimators, which are used to classify new events
To build unbiased predictions for data, pass the **same** dataset (with same order of events)
as in training to `predict`, `predict_proba` or `staged_predict_proba`, in which case
classifier will use to predict each event that base classifier which didn't use that event during training.
To use information from not one, but several classifiers during predictions,
provide appropriate voting function.
Parameters:
-----------
:param sklearn.BaseEstimator base_estimator: base classifier, which will be used for training
:param int n_folds: count of folds
:param features: features used in training
:type features: None or list[str]
:param ipc_profile: profile for IPython cluster, None to compute locally.
:type ipc_profile: None or str
:param random_state: random state for reproducibility
:type random_state: None or int or RandomState
"""
def __init__(self,
base_estimator,
n_folds=2,
random_state=None,
features=None,
ipc_profile=None):
super(FoldingClassifier, self).__init__(features=features)
self.estimators = []
self.ipc_profile = ipc_profile
self.n_folds = n_folds
self.base_estimator = base_estimator
self._folds_indices = None
self.random_state = random_state
self._random_number = None
def _get_folds_column(self, length):
if self._random_number is None:
self._random_number = check_random_state(self.random_state).randint(0, 100000)
folds_column = numpy.zeros(length)
for fold_number, (_, folds_indices) in enumerate(
KFold(length, self.n_folds, shuffle=True, random_state=self._random_number)):
folds_column[folds_indices] = fold_number
return folds_column
def fit(self, X, y, sample_weight=None):
"""
Train the classifier, will train several base classifiers on overlapping
subsets of training dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
"""
if hasattr(self.base_estimator, 'features'):
assert self.base_estimator.features is None, 'Base estimator must have None features! ' \
'Use features parameter in Folding to fix it'
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=True)
X = self._get_features(X)
self._set_classes(y)
folds_column = self._get_folds_column(len(X))
for _ in range(self.n_folds):
self.estimators.append(clone(self.base_estimator))
if sample_weight is None:
weights_iterator = (None for _ in range(self.n_folds))
else:
weights_iterator = (sample_weight[folds_column != index] for index in range(self.n_folds))
result = utils.map_on_cluster(self.ipc_profile, train_estimator,
range(len(self.estimators)),
self.estimators,
(X.iloc[folds_column != index, :].copy() for index in range(self.n_folds)),
(y[folds_column != index] for index in range(self.n_folds)),
weights_iterator)
for status, data in result:
if status == 'success':
name, classifier, spent_time = data
self.estimators[name] = classifier
else:
print('Problem while training on the node, report:\n', data)
return self
def _get_estimators_proba(self, estimator, data):
try:
return estimator.predict_proba(data)
except AttributeError:
probabilities = numpy.zeros(shape=(len(data), self.n_classes_))
labels = estimator.predict(data)
probabilities[numpy.arange(len(labels)), labels] = 1
return probabilities
def predict(self, X, vote_function=None):
"""
Predict labels. To get unbiased predictions, you can pass training dataset
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used. Parameters: numpy.ndarray [n_classifiers, n_samples]
:type vote_function: None or function, if None, will use folding scheme.
:rtype: numpy.array of shape [n_samples, n_classes] with labels
"""
proba = self.predict_proba(X, vote_function=vote_function)
return self.classes_.take(numpy.argmax(proba, axis=1), axis=0)
def predict_proba(self, X, vote_function=None):
"""
Predict probabilities. To get unbiased predictions, you can pass training dataset
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then self.vote_function is used. Parameters: numpy.ndarray [n_classifiers, n_samples, n_classes]
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples, n_classes] with probabilities
"""
if vote_function is not None:
print('Using voting KFold prediction')
X = self._get_features(X)
probabilities = []
for classifier in self.estimators:
probabilities.append(self._get_estimators_proba(classifier, X))
# probabilities: [n_classifiers, n_samples, n_classes], reduction over 0th axis
probabilities = numpy.array(probabilities)
return vote_function(probabilities)
else:
print('KFold prediction using folds column')
X = self._get_features(X)
folds_column = self._get_folds_column(len(X))
probabilities = numpy.zeros(shape=(len(X), self.n_classes_))
for fold in range(self.n_folds):
prob = self._get_estimators_proba(self.estimators[fold], X.iloc[folds_column == fold, :])
probabilities[folds_column == fold] = prob
return probabilities
def staged_predict_proba(self, X, vote_function=None):
"""
Predict probabilities on each stage. To get unbiased predictions, you can pass training dataset
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then self.vote_function is used.
:type vote_function: None or function
:return: iterator for numpy.array of shape [n_samples, n_classes] with probabilities
"""
if vote_function is not None:
print('Using voting KFold prediction')
X = self._get_features(X)
iterators = [estimator.staged_predict_proba(X) for estimator in self.estimators]
for fold_prob in zip(*iterators):
probabilities = numpy.array(fold_prob)
yield vote_function(probabilities)
else:
print('Default prediction')
X = self._get_features(X)
folds_column = self._get_folds_column(len(X))
iterators = [self.estimators[fold].staged_predict_proba(X.iloc[folds_column == fold, :])
for fold in range(self.n_folds)]
for fold_prob in zip(*iterators):
probabilities = numpy.zeros(shape=(len(X), 2))
for fold in range(self.n_folds):
probabilities[folds_column == fold] = fold_prob[fold]
yield probabilities
| apache-2.0 |
Dallinger/Dallinger | dallinger/data.py | 1 | 14062 | """Data-handling tools."""
from .config import get_config
import csv
import errno
import io
import logging
import os
import shutil
import six
import subprocess
import tempfile
import warnings
from zipfile import ZipFile, ZIP_DEFLATED
import botocore
import boto3
import hashlib
import postgres_copy
import psycopg2
from dallinger.compat import open_for_csv
from dallinger.heroku.tools import HerokuApp
from dallinger import db
from dallinger import models
logger = logging.getLogger(__name__)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
try:
import tablib
except ImportError:
logger.debug("Failed to import tablib.")
class S3BucketUnavailable(Exception):
"""No Amazon S3 bucket could be found based on the user's
configuration.
"""
table_names = [
"info",
"network",
"node",
"notification",
"participant",
"question",
"transformation",
"transmission",
"vector",
]
def find_experiment_export(app_id):
"""Attempt to find a zipped export of an experiment with the ID provided
and return its path. Returns None if not found.
Search order:
1. local "data" subdirectory
2. user S3 bucket
3. Dallinger S3 bucket
"""
# Check locally first
cwd = os.getcwd()
data_filename = "{}-data.zip".format(app_id)
path_to_data = os.path.join(cwd, "data", data_filename)
if os.path.exists(path_to_data):
try:
Data(path_to_data)
except IOError:
from dallinger import logger
logger.exception(
"Error reading local data file {}, checking remote.".format(
path_to_data
)
)
else:
return path_to_data
# Get remote file instead
path_to_data = os.path.join(tempfile.mkdtemp(), data_filename)
buckets = [user_s3_bucket(), dallinger_s3_bucket()]
for bucket in buckets:
if bucket is None:
continue
try:
bucket.download_file(data_filename, path_to_data)
except botocore.exceptions.ClientError:
pass
else:
return path_to_data
def load(app_id):
"""Load the data from wherever it is found."""
path_to_data = find_experiment_export(app_id)
if path_to_data is None:
raise IOError("Dataset {} could not be found.".format(app_id))
return Data(path_to_data)
def dump_database(id):
"""Dump the database to a temporary directory."""
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
FNULL = open(os.devnull, "w")
heroku_app = HerokuApp(dallinger_uid=id, output=FNULL)
heroku_app.backup_capture()
heroku_app.backup_download()
for filename in os.listdir(tmp_dir):
if filename.startswith("latest.dump"):
os.rename(filename, "database.dump")
os.chdir(current_dir)
return os.path.join(tmp_dir, "database.dump")
def backup(id):
"""Backup the database to S3."""
filename = dump_database(id)
key = "{}.dump".format(id)
bucket = user_s3_bucket()
bucket.upload_file(filename, key)
return _generate_s3_url(bucket, key)
def registration_key(id):
return "{}.reg".format(id)
def register(id, url=None):
"""Register a UUID key in the global S3 bucket."""
bucket = registration_s3_bucket()
if bucket is None:
return
key = registration_key(id)
obj = bucket.Object(key)
obj.put(Body=url or "missing")
return _generate_s3_url(bucket, key)
def is_registered(id):
"""Check if a UUID is already registered"""
bucket = registration_s3_bucket()
if bucket is None:
return False
key = registration_key(id)
found_keys = set(obj.key for obj in bucket.objects.filter(Prefix=key))
return key in found_keys
def copy_heroku_to_local(id):
"""Copy a Heroku database locally."""
heroku_app = HerokuApp(dallinger_uid=id)
try:
subprocess.call(["dropdb", heroku_app.name])
except Exception:
pass
heroku_app.pg_pull()
def copy_db_to_csv(dsn, path, scrub_pii=False):
"""Copy a local database to a set of CSV files."""
if "postgresql://" in dsn or "postgres://" in dsn:
conn = psycopg2.connect(dsn=dsn)
else:
conn = psycopg2.connect(database=dsn, user="dallinger")
cur = conn.cursor()
for table in table_names:
csv_path = os.path.join(path, "{}.csv".format(table))
with open(csv_path, "w") as f:
sql = "COPY {} TO STDOUT WITH CSV HEADER".format(table)
cur.copy_expert(sql, f)
conn.close()
if scrub_pii:
_scrub_participant_table(path)
# Backwards compatibility for imports
copy_local_to_csv = copy_db_to_csv
def _scrub_participant_table(path_to_data):
"""Scrub PII from the given participant table."""
path = os.path.join(path_to_data, "participant.csv")
with open_for_csv(path, "r") as input, open("{}.0".format(path), "w") as output:
reader = csv.reader(input)
writer = csv.writer(output)
headers = next(reader)
writer.writerow(headers)
for i, row in enumerate(reader):
row[headers.index("worker_id")] = row[headers.index("id")]
row[headers.index("unique_id")] = "{}:{}".format(
row[headers.index("id")], row[headers.index("assignment_id")]
)
writer.writerow(row)
os.rename("{}.0".format(path), path)
def export(id, local=False, scrub_pii=False):
"""Export data from an experiment."""
print("Preparing to export the data...")
if local:
db_uri = db.db_url
else:
db_uri = HerokuApp(id).db_uri
return export_db_uri(id, db_uri=db_uri, local=local, scrub_pii=scrub_pii)
def export_db_uri(id, db_uri, local, scrub_pii):
# Create the data package if it doesn't already exist.
subdata_path = os.path.join("data", id, "data")
try:
os.makedirs(subdata_path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(subdata_path):
raise
# Copy in the data.
copy_db_to_csv(db_uri, subdata_path, scrub_pii=scrub_pii)
# Copy the experiment code into a code/ subdirectory.
try:
shutil.copyfile(
os.path.join("snapshots", id + "-code.zip"),
os.path.join("data", id, id + "-code.zip"),
)
except Exception:
pass
# Copy in the DATA readme.
# open(os.path.join(id, "README.txt"), "a").close()
# Save the experiment id.
with open(os.path.join("data", id, "experiment_id.md"), "a+") as file:
file.write(id)
# Zip data
src = os.path.join("data", id)
dst = os.path.join("data", id + "-data.zip")
archive_data(id, src, dst)
cwd = os.getcwd()
data_filename = "{}-data.zip".format(id)
path_to_data = os.path.join(cwd, "data", data_filename)
# Backup data on S3 unless run locally
if not local:
bucket = user_s3_bucket()
config = get_config()
try:
bucket.upload_file(path_to_data, data_filename)
registration_url = _generate_s3_url(bucket, data_filename)
s3_console_url = (
f"https://s3.console.aws.amazon.com/s3/object/{bucket.name}"
f"?region={config.aws_region}&prefix={data_filename}"
)
# Register experiment UUID with dallinger
register(id, registration_url)
print(
"A copy of your export was saved also to Amazon S3:\n"
f" - bucket name: {bucket.name}\n"
f" - S3 console URL: {s3_console_url}"
)
except AttributeError:
raise S3BucketUnavailable("Could not find an S3 bucket!")
return path_to_data
def bootstrap_db_from_zip(zip_path, engine):
"""Given a path to a zip archive created with `export()`, first empty the
database, then recreate it based on the data stored in the included .csv
files.
"""
db.init_db(drop_all=True, bind=engine)
ingest_zip(zip_path, engine=engine)
def ingest_zip(path, engine=None):
"""Given a path to a zip file created with `export()`, recreate the
database with the data stored in the included .csv files.
"""
import_order = [
"network",
"participant",
"node",
"info",
"notification",
"question",
"transformation",
"vector",
"transmission",
]
with ZipFile(path, "r") as archive:
filenames = archive.namelist()
for name in import_order:
filename = [f for f in filenames if name in f][0]
model_name = name.capitalize()
model = getattr(models, model_name)
file = archive.open(filename)
if six.PY3:
file = io.TextIOWrapper(file, encoding="utf8", newline="")
ingest_to_model(file, model, engine)
def fix_autoincrement(engine, table_name):
"""Auto-increment pointers are not updated when IDs are set explicitly,
so we manually update the pointer so subsequent inserts work correctly.
"""
engine.execute("select setval('{0}_id_seq', max(id)) from {0}".format(table_name))
def ingest_to_model(file, model, engine=None):
"""Load data from a CSV file handle into storage for a
SQLAlchemy model class.
"""
if engine is None:
engine = db.engine
reader = csv.reader(file)
columns = tuple('"{}"'.format(n) for n in next(reader))
postgres_copy.copy_from(
file, model, engine, columns=columns, format="csv", HEADER=False
)
fix_autoincrement(engine, model.__table__.name)
def archive_data(id, src, dst):
print("Zipping up the package...")
with ZipFile(dst, "w", ZIP_DEFLATED, allowZip64=True) as zf:
for root, dirs, files in os.walk(src):
for file in files:
filename = os.path.join(root, file)
arcname = filename.replace(src, "").lstrip("/")
zf.write(filename, arcname)
shutil.rmtree(src)
print(f"Done. Local export available in {dst}")
def _get_canonical_aws_user_id(s3):
return s3.meta.client.list_buckets()["Owner"]["ID"]
def _get_or_create_s3_bucket(s3, name):
"""Get an S3 bucket resource after making sure it exists"""
exists = True
try:
s3.meta.client.head_bucket(Bucket=name)
except botocore.exceptions.ClientError as e:
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
exists = False
else:
raise
if not exists:
s3.create_bucket(Bucket=name)
return s3.Bucket(name)
def _generate_s3_url(bucket, key):
return "https://{}.s3.amazonaws.com/{}".format(bucket.name, key)
def user_s3_bucket(canonical_user_id=None):
"""Get the user's S3 bucket."""
s3 = _s3_resource()
if not canonical_user_id:
try:
canonical_user_id = _get_canonical_aws_user_id(s3)
except botocore.exceptions.ClientError:
return None
s3_bucket_name = "dallinger-{}".format(
hashlib.sha256(canonical_user_id.encode("utf8")).hexdigest()[0:8]
)
return _get_or_create_s3_bucket(s3, s3_bucket_name)
def dallinger_s3_bucket():
"""The public `dallinger` S3 bucket."""
s3 = _s3_resource(dallinger_region=True)
return s3.Bucket("dallinger")
def registration_s3_bucket():
"""The public write-only `dallinger-registration` S3 bucket."""
config = get_config()
if not config.ready:
config.load()
if config.get("enable_global_experiment_registry", False):
s3 = _s3_resource(dallinger_region=True)
return s3.Bucket("dallinger-registrations")
def _s3_resource(dallinger_region=False):
"""A boto3 S3 resource using the AWS keys in the config."""
config = get_config()
if not config.ready:
config.load()
region = "us-east-1" if dallinger_region else config.get("aws_region")
return boto3.resource(
"s3",
region_name=region,
aws_access_key_id=config.get("aws_access_key_id"),
aws_secret_access_key=config.get("aws_secret_access_key"),
)
class Data(object):
"""Dallinger data object."""
def __init__(self, URL):
self.source = URL
if self.source.endswith(".zip"):
input_zip = ZipFile(URL)
tmp_dir = tempfile.mkdtemp()
input_zip.extractall(tmp_dir)
for tab in table_names:
setattr(
self,
"{}s".format(tab),
Table(os.path.join(tmp_dir, "data", "{}.csv".format(tab))),
)
class Table(object):
"""Dallinger data-table object."""
def __init__(self, path):
self.tablib_dataset = tablib.Dataset().load(open(path).read(), "csv")
@property
def csv(self):
"""Comma-separated values."""
return self.tablib_dataset.csv
@property
def dict(self):
"""A Python dictionary."""
return self.tablib_dataset.dict[0]
@property
def df(self):
"""A pandas DataFrame."""
return self.tablib_dataset.df
@property
def html(self):
"""An HTML table."""
return self.tablib_dataset.html
@property
def latex(self):
"""A LaTeX table."""
return self.tablib_dataset.latex
@property
def ods(self):
"""An OpenDocument Spreadsheet."""
return self.tablib_dataset.ods
@property
def tsv(self):
"""Tab-separated values."""
return self.tablib_dataset.tsv
@property
def xls(self):
"""Legacy Excel spreadsheet format."""
return self.tablib_dataset.xls
@property
def xlsx(self):
"""Modern Excel spreadsheet format."""
return self.tablib_dataset.xlsx
@property
def yaml(self):
"""YAML."""
return self.tablib_dataset.yaml
| mit |
IntelLabs/hpat | examples/dataframe/rolling/dataframe_rolling_corr.py | 1 | 1945 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def df_rolling_corr():
df = pd.DataFrame({'A': [3, 3, 3, 5, 8], 'B': [-3, -3, -3, -5, -8]})
other = pd.DataFrame({'A': [3, 4, 4, 4, 8], 'B': [-3, -4, -4, -4, -8]})
out_df = df.rolling(4).corr(other)
# Expect DataFrame of
# {'A': [NaN, NaN, NaN, 0.333333, 0.916949],
# 'B': [NaN, NaN, NaN, 0.333333, 0.916949]}
return out_df
print(df_rolling_corr())
| bsd-2-clause |
mdzik/bearded-octo-wookie | BasicPrograms/lbm.py | 1 | 9200 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 10:04:30 2015
@author: mdzikowski
"""
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 22:37:49 2015
@author: michal
"""
import numpy as np
import matplotlib.pyplot as plt
# all basic const and tools for lbm are here
import bearded_octo_wookie.lbm as lbm
import bearded_octo_wookie.MRT as MRT
import bearded_octo_wookie.ZouHeBC as BC
tau0 = 1.
dx0=1.
x0 = 0.
y0 = 0.
nx0 = 25
ny0 = 100
nt = 2000
e0 = lbm.e
e = lbm.e
e_opp = lbm.e_opp
forAll9do = lbm.forAll9do
W = lbm.W
x0,y0 = np.meshgrid(np.arange(nx0), np.arange(ny0), indexing='ij')
#x0=x0.T
#y0=y0.T
################################################################
## INIT GRID 0
iy0n = list()
ix0n = list()
ix00,iy00 = np.meshgrid(np.arange(nx0), np.arange(ny0), indexing='ij')
#ix00=ix00.T
#iy00=iy00.T
bx0 = list()
by0 = list()
#for ix,iy in zip(ix00.reshape(nx0*ny0), iy00.reshape(nx0*ny0)):
# #if m == 1:
# if (ix == 0 and iy==ny0-1) or (ix == nx0-1 and iy==ny0-1) or (ix == 0 and iy==0) or (ix == nx0-1 and iy==0):
# bx0.append(ix)
# by0.append(iy)
for i in range(0,9):
ixt = np.roll(np.roll(ix00[:,:],shift=e0[i][1],axis=0),shift=e0[i][0],axis=1)
iyt = np.roll(np.roll(iy00[:,:],shift=e0[i][1],axis=0),shift=e0[i][0],axis=1)
ix0n.append(ixt)
iy0n.append(iyt)
ix0= np.array(ix0n)
iy0= np.array(iy0n)
f_in0 = np.zeros((nx0,ny0,9))
f_out0 = np.zeros_like(f_in0)
def init(_i, _W, _e, _rho, _U, _fin):
_fin[:,:,_i] = _W[_i] * _rho[:,:]
cu = 3. * ( _U[:,:,0] * _e[_i,0] + _U[:,:,1] * _e[_i,1])
_fin[:,:,_i] = _W[_i] * _rho[:,:] * (1. + cu[:,:] + 0.5*cu[:,:]*cu[:,:] - (3./2.) * ( _U[:,:,0]**2 + _U[:,:,1]**2 ) )
def stream(_i, _ox, _oy, _fin, _fou):
_fin[:,:,_i] = _fou[_ox[_i],_oy[_i],_i]
def BGK(_i, _U, _rho, _e, _W, _tau, _F, _fin, _fou):
cu = 3. * ( _U[:,:,0] * _e[_i,0] + _U[:,:,1] * _e[_i,1])
feq1 = _W[_i] * _rho[:,:] * (1. + cu[:,:] + 0.5*cu[:,:]*cu[:,:] - (3./2.) * ( _U[:,:,0]**2 + _U[:,:,1]**2 ) )
cu = 3. * ( (_U[:,:,0]+_F[:,:,0]) * _e[_i,0] + (_U[:,:,1] + _F[:,:,1]) * _e[_i,1])
feq2 = _W[_i] * _rho[:,:] * (1. + cu[:,:] + 0.5*cu[:,:]*cu[:,:] - (3./2.) * ( (_U[:,:,0]+_F[:,:,0])**2 + (_U[:,:,1]+_F[:,:,1])**2 ) )
_fou[:,:,_i] = _fin[:,:,_i] + (_tau) * ( feq1[:,:] - _fin[:,:,_i] ) + (feq2[:,:]-feq1[:,:])
def BB(_i, _bx, _by, _e_opp, _fin, _fou):
for bx,by in zip(_bx,_by):
_fou[bx,by,_i] = _fin[bx,by,_e_opp[_i]]
def MRT_meq(_i, _U, _rho, _fmeq, _meq):
_meq[_i] = _fmeq[i](_rho, _U[:,:,0], _U[:,:,1])
def applyBC(A, f):
shape = f.shape
_f = np.ones((shape[0]+1, shape[1]+1))
_f[:,:-1] = f
return A.dot(_f.T).T
def applyCornerBC(A, f):
shape = f.shape
_f = np.ones(shape[0]+1)
_f[:-1] = f
return A.dot(_f.T).T
def applyWideBC(A, f):
shape = f.shape
_f = np.ones((shape[0], shape[1]+1))
_f[:,:-1] = f
assert len(A[:,0,0] == len(_f[:,0]))
for i, ff in enumerate(_f):
f[i,:] = A[i,:,:].dot(_f[i,:]).T
return f
meq0, M0 = MRT.getMRT(e0)
M0 = np.array(M0, dtype=float)
M0inv = np.linalg.inv(M0)
rho0 = np.ones_like(f_in0[:,:,0])
U0 = np.zeros_like(f_in0[:,:,:2])
forAll9do(init, W, e, rho0, U0, f_in0)
_tau = 1.
F0 = np.zeros_like(U0)
F0[:,:,0] = 0.0000
#==============================================================================
# BOUNDARY CONDYTIONS
#==============================================================================
#Uxm = 0.000
#Uym = 0.0001
#ubc = lambda x,y : (y/float(ny0) * Uxm, x/float(nx0) * Uym)
#ubc = lambda x,y : (x/float(nx0) * Uxm, y/float(ny0) * Uym)
Uscale = 0.0001
def ubc(x,y) :
x = x / float(nx0) - 0.5
y = y / float(ny0)- 0.5
return (-Uscale*y, Uscale*x)
#==============================================================================
# x, y : 1d arrays
# an evenly spaced grid.
# u, v : 2d arrays
# x and y-velocities. Number of rows should match length of y, and the number of columns should match x.
#==============================================================================
#ux,uy = ubc(x0,y0)
#plt.streamplot(np.arange(ny0),np.arange(nx0),uy,ux )
#plt.show()
ubcC = ubc
#==============================================================================
# BC_fun_Left, BC_A_Left = BC.getUBc([0,1], 0., 0.)
# BC_fun_Right, BC_A_Right = BC.getUBc([0,-1], 0., 0.)
# BC_fun_Top, BC_A_Top = BC.getUBc([-1,0], 0., 0.0)
# BC_fun_Bottom, BC_A_Bottom = BC.getUBc([1,0], 0., 0.)
#==============================================================================
e_l = np.array([1,0])
e_r = np.array([-1,0])
e_t = np.array([0,-1])
e_b = np.array([0,1])
BC_fun_Left, BC_A_Left = BC.getUBc(e_l, 0., 0.)
BC_fun_Right, BC_A_Right = BC.getUBc(e_r, 0., 0.)
BC_fun_Top, BC_A_Top = BC.getUBc(e_t, 0., 0.0)
BC_fun_Bottom, BC_A_Bottom = BC.getUBc(e_b, 0., 0.)
BC_A_Right = list()
for y,x in zip(y0[1:-1,-1], x0[1:-1,-1]):
BC_A_Right.append( BC_fun_Right( *ubc(x,y) ) )
BC_A_Right = np.array(BC_A_Right)
BC_A_Left = list()
for y,x in zip(y0[1:-1,0], x0[1:-1,0]):
BC_A_Left.append( BC_fun_Left( *ubc(x,y) ) )
BC_A_Left = np.array(BC_A_Left)
BC_A_Top = list()
for y,x in zip(y0[-1,1:-1], x0[-1,1:-1]):
BC_A_Top.append( BC_fun_Top( *ubc(x,y) ) )
BC_A_Top = np.array(BC_A_Top)
BC_A_Bottom = list()
for y,x in zip(y0[0,1:-1], x0[0,1:-1]):
BC_A_Bottom.append( BC_fun_Bottom( *ubc(x,y) ) )
BC_A_Bottom = np.array(BC_A_Bottom)
BC_fun_Left_Top, BC_A_Left_Top = BC.getCornerUBc(e_l+e_t, 0., 0.)
BC_fun_Left_Bottom, BC_A_Left_Bottom = BC.getCornerUBc(e_l+e_b, 0., 0.)
BC_fun_Right_Top, BC_A_Right_Top = BC.getCornerUBc(e_r+e_t, 0., 0.)
BC_fun_Right_Bottom, BC_A_Right_Bottom = BC.getCornerUBc(e_r+e_b, 0., 0.)
for it in range(nt):
rho0[:,:] = np.sum( f_in0[:,:,:], 2 )
#==============================================================================
# ### left
f_in0[1:-1,0,:] = applyWideBC(BC_A_Left, f_in0[1:-1,0,:])
# ### right
f_in0[1:-1,-1,:] = applyWideBC(BC_A_Right, f_in0[1:-1,-1,:])
#
# ### bottom
f_in0[0,1:-1,:] = applyWideBC(BC_A_Bottom, f_in0[0,1:-1,:])
# ### top
f_in0[-1,1:-1,:] = applyWideBC(BC_A_Top, f_in0[-1,1:-1,:])
#==============================================================================
### left top
f_in0[-1,0,:] = applyCornerBC(BC_fun_Left_Top( rho0[-2,1], *ubcC(x0[-1,0], y0[-1,0] ) ) , f_in0[-1,0,:])
### left bottom
f_in0[0,0,:] = applyCornerBC(BC_fun_Left_Bottom( rho0[1,1], *ubcC(x0[0,0], y0[0,0] ) ), f_in0[0,0,:])
### right top
f_in0[-1,-1,:] = applyCornerBC(BC_fun_Right_Top( rho0[-2,-2], *ubcC(x0[-1,-1], y0[-1,-1] ) ), f_in0[-1,-1,:])
### right bottom
f_in0[0,-1,:] = applyCornerBC(BC_fun_Right_Bottom( rho0[1,-2], *ubcC(x0[0,-1], y0[0,-1] ) ), f_in0[0,-1,:])
rho0[:,:] = np.sum( f_in0[:,:,:], 2 )
U0[:,:] = 0.
for i in range(1,9):
U0[:,:,0] = U0[:,:,0] + e[i][0]*f_in0[:,:,i]
U0[:,:,1] = U0[:,:,1] + e[i][1]*f_in0[:,:,i]
U0[:,:,0] = U0[:,:,0] / rho0
U0[:,:,1] = U0[:,:,1] / rho0
#forAll9do(BGK, U0, rho0, e, W, tau0, F0, f_in0, f_out0)
for i,f in enumerate(f_in0):
f = f.T
m = (M0.dot(f))
meq_0 = meq0(rho0[i,:], U0[i,:,0], U0[i,:,1])
meq_1 = meq0(rho0[i,:], U0[i,:,0] + F0[i,:,0], U0[i,:,1] + F0[i,:,1])
f_out = M0inv.dot( m + (_tau) * ( meq_0 - m ) + (meq_1 - meq_0) )
f_out0[i,:,:] = f_out.T
if it > 0 and np.mod(it, 500) == 0:
rho0[:,:] = np.sum( f_in0[:,:,:], 2 )
U0[:,:] = 0.
for i in range(1,9):
U0[:,:,0] = U0[:,:,0] + e[i][0]*f_in0[:,:,i]
U0[:,:,1] = U0[:,:,1] + e[i][1]*f_in0[:,:,i]
U0[:,:,0] = U0[:,:,0] / rho0
U0[:,:,1] = U0[:,:,1] / rho0
iii = 0
plt.cla()
plt.subplot(2,2,1)
plt.contourf(x0,y0,U0[:,:,0])
#plt.imshow(U0[:,:,0],interpolation='nearest')
#print np.max(np.sqrt(U0[:,:,0]**2))
plt.colorbar()
plt.subplot(2,2,2)
plt.contourf(x0,y0,U0[:,:,1])
#plt.imshow(U0[:,:,1],interpolation='nearest')
print np.max(np.sqrt(U0[:,:,1]**2))
plt.colorbar()
plt.subplot(2,2,3)
#plt.contourf(ix00,iy00,np.sqrt(U0[:,:,0]**2 + U0[:,:,1]**2))
plt.quiver(x0,y0,U0[:,:,0], U0[:,:,1] )
plt.streamplot(np.arange(nx0),np.arange(ny0),U0[:,:,0].T,U0[:,:,1].T)
plt.subplot(2,2,4)
plt.contourf(x0,y0,rho0[:,:])
#plt.imshow(rho0[:,:],interpolation='nearest')
plt.colorbar()
#==============================================================================
# plt.figure()
# plt.plot(U0[:,0,1])
# plt.plot(U0[:,0,0])
#==============================================================================
plt.show()
#forAll9do(BB, bx0, by0, e_opp, f_in0, f_out0)
forAll9do(stream, ix0, iy0, f_in0, f_out0)
plt.contourf(x0,y0,U0[:,:,0])
plt.show() | apache-2.0 |
kevinyu98/spark | python/pyspark/sql/tests/test_dataframe.py | 2 | 39695 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
from pyspark.sql import DataFrame
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np
sql = "select 1 v, 1 v"
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEquals(types.iloc[0], np.int32)
self.assertEquals(types.iloc[1], np.int32)
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_on_cross_join(self):
import numpy as np
sql = """
select t1.*, t2.* from (
select explode(sequence(1, 3)) v
) t1 left join (
select explode(sequence(1, 3)) v
) t2
"""
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.crossJoin.enabled": True,
"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEquals(types.iloc[0], np.int32)
self.assertEquals(types.iloc[1], np.int32)
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
CAST(col9 AS TIMESTAMP) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.util.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
def test_same_semantics_error(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, "should be of DataFrame.*int"):
self.spark.range(10).sameSemantics(1)
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
nianxing/naowalkoptimiser | server/Localisation.theta.py | 2 | 12012 | """ An SIR Particle Filter based localisation system for tracking a robot with ambiguous bearing
Jason Kulk
"""
from NAO import NAO
import numpy, time
class Localisation:
THETA = 0
THETADOT = 1
STATE_LENGTH = 2
NUM_PARTICLES = 1000
def __init__(self):
""" """
self.reset = True
self.time = time.time()
self.previoustime = self.time
self.control = numpy.zeros(3) # the current control
self.previouscontrol = self.control # the previous control
self.measurement = numpy.zeros(Localisation.STATE_LENGTH) # the current measurement of the state
self.previousmeasurement = self.measurement # the previous measurement of the state
self.States = numpy.zeros((Localisation.NUM_PARTICLES, Localisation.STATE_LENGTH)) # the (states) particles
self.PreviousStates = self.States # the previous state of each particle (used for derivative calculations)
self.Weights = numpy.zeros(Localisation.NUM_PARTICLES) # the weights of each particle
self.State = self.States[0] # the estimate of the state
# Variables for the control model:
self.accelerationduration = [2.0, 2.0, 1.0] # the duration an acceleration is applied (s)
self.accelerationmagnitudes = [7.5, 5.0, 0.7] # the magnitude of the accelerations [forward, sideward, turn] (cm/s/s, rad/s)
self.accelerations = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the current acceleration (cm/s/s) for each particle
self.accelendtimes = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the times the accelerations will be set to zero given no change in control (s)
def update(self, control, nao):
""" """
self.time = time.time()
self.control = control
self.measurement = self.__naoToState(nao)
if self.reset:
self.__initParticles()
self.reset = False
else:
self.predict()
self.updateWeights()
self.resample()
self.estimateState()
self.previoustime = self.time
self.PreviousStates = self.States
def predict(self):
""" Updates each of the particles based on system and control model """
self.modelSystem()
self.modelControl()
def updateWeights(self):
""" """
# calculate the weights based on a measurement model
self.Weights = self.__gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], 0.02) + self.__gauss(self.States[:,Localisation.THETA] - (self.measurement[Localisation.THETA] - numpy.pi), 0.02)
self.Weights *= self.__gauss(self.States[:,Localisation.THETADOT] - self.measurement[Localisation.THETADOT], 0.25)
# normalise the weights so that their sum is one
sum = numpy.sum(self.Weights)
if sum != 0:
self.Weights /= sum
def resample(self):
""" """
# An SIS filter resamples only when necessary
Neff = 1.0/numpy.sum(self.Weights**2)
Ns = Localisation.NUM_PARTICLES
if Neff < 0.1*Ns:
NsInv = 1.0/Ns
c = numpy.cumsum(self.Weights)
u = NsInv*numpy.arange(Ns) + numpy.random.uniform(0, NsInv)
i = 0
#print "Pre resample:"
#print self.States[:,0:3]
for j in range(Ns):
while u[j] > c[i]:
i = i + 1
self.States[j] = self.States[i]
self.PreviousStates[j] = self.PreviousStates[i]
self.accelerations[j] = self.accelerations[i]
self.accelendtimes[j] = self.accelendtimes[i]
#print "Post resample:"
#print self.States[:,0:3]
self.Weights = NsInv*numpy.ones(Ns)
def modelSystem(self):
""" Updates each particle based on the system model """
dt = self.time - self.previoustime
sdthetadot = 0.25
self.States[:,Localisation.THETADOT] = self.PreviousStates[:,Localisation.THETADOT] + numpy.random.normal(0, sdthetadot, size=self.PreviousStates.shape[0])
self.States[:,Localisation.THETA] = self.PreviousStates[:,Localisation.THETA] + self.States[:,Localisation.THETADOT]*dt
self.States[:,Localisation.THETA] = numpy.arctan2(numpy.sin(self.States[:,Localisation.THETA]), numpy.cos(self.States[:,Localisation.THETA]))
def modelControl(self):
""" Updates each particle based on the control model """
# my model for control, is that a change in control will effect the state by
# introducing a constant acceleration over the next 1 second (2 steps)
deltacontrol = self.control - self.previouscontrol
sdtheta = 0.2 # noise on estimate of acceleration magnitude (in rad/s/s)
# put a bit of spin on the robot if the desired bearing changes
if abs(deltacontrol[1]) > 0:
self.accelerations[:,2] += (1/self.accelerationduration[2])*deltacontrol[1] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
# put a bit of spin on the robot if the final orientation changes
if self.control[2] < 1000 and abs(self.control[0]) < 10 and abs(deltacontrol[2]) > 0:
if self.previouscontrol[2] > 1000:
self.accelerations[:,2] += (1/self.accelerationduration[2])*self.control[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
else:
self.accelerations[:,2] += (1/self.accelerationduration[2])*deltacontrol[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
self.accelerations = numpy.where(self.accelendtimes > self.time, self.accelerations, 0)
# calculate the controls contribution to the state velocity
self.States[:,Localisation.THETADOT] += self.accelerations[:,2]*(self.time - self.previoustime)
self.previouscontrol = self.control
def estimateState(self):
""" Updates the estimate of the state """
best = numpy.argmin(self.Weights)
beststate = self.States[best,:]
#print "Best State:", beststate
cond = (numpy.sum(numpy.fabs(self.States - beststate), axis=1) < 10)
beststates = numpy.compress(cond, self.States, axis=0)
bestweights = numpy.compress(cond, self.Weights)
#print "States", self.States
#print "States within window:", cond
#print "States close to best", beststates
#print "Weights close to best", bestweights
#print "Product:", (bestweights*beststates.T).T
self.State = numpy.sum((bestweights*beststates.T).T, axis=0)
self.State = numpy.sum((self.Weights*self.States.T).T, axis=0)
print "Estimate:", self.State
if numpy.isnan(self.State[0]):
print "FAIL"
self.__updateAttributesFromState()
def __initParticles(self):
""" Initialises self.Particles to contain Localisation.NUM_PARTICLES particles around the current measurement """
#print "Initialising Particles around", self.measurement
self.States += self.measurement
# I know for certain that at the beginning the robot is not moving, so all of the velocities should be zero. The Position however should get some noise
self.States[:,Localisation.THETA] += numpy.random.normal(0, 0.02, size=self.States.shape[0])
# now swap half of the orientations
self.States[:,Localisation.THETA] = numpy.where(numpy.random.uniform(0,1, size=self.States.shape[0]) < 0.5, self.States[:, Localisation.THETA], self.States[:, Localisation.THETA] - numpy.pi)
#print self.States
def __naoToState(self, nao):
state = numpy.zeros(Localisation.STATE_LENGTH)
if nao != None:
#state[Localisation.X] = nao.X
#state[Localisation.Y] = nao.Y
state[Localisation.THETA] = nao.Orientation
#state[Localisation.XDOT] = nao.VX
#state[Localisation.YDOT] = nao.VY
state[Localisation.THETADOT] = nao.VOrientation
#print nao.X
#self.AllX.append(nao.X)
#self.AllY.append(nao.Y)
#self.AllTheta.append(nao.Orientation)
#self.AllXdot.append(nao.VX)
#self.AllYdot.append(nao.VY)
#self.AllThetadot.append(nao.VOrientation)
#print "SDs: X", numpy.std(self.AllX), "Y", numpy.std(self.AllY), "Theta", numpy.std(self.AllTheta), "Xdot", numpy.std(self.AllXdot), "Ydot", numpy.std(self.AllYdot), "Thetadot", numpy.std(self.AllThetadot)
return state
def __updateAttributesFromState(self):
""" I have a bunch of convienent attributes for accessing the state. I need to keep them for backward compatiblity purposes. """
self.X = self.measurement[0]#self.State[Localisation.X]
self.Y = self.measurement[1]#self.State[Localisation.Y]
self.Theta = self.State[Localisation.THETA]
self.VX = 0#self.State[Localisation.XDOT]
self.VY = 0#self.State[Localisation.YDOT]
self.VTheta = self.State[Localisation.THETADOT]
self.V = 0#numpy.sqrt(self.VX**2 + self.VY**2)
def __gauss(self, x, sigma):
return (1.0/numpy.sqrt(2*numpy.pi*sigma))*numpy.exp(-(x**2)/(2*sigma**2))
if __name__ == '__main__':
import matplotlib
matplotlib.use('WXAgg')
matplotlib.rcParams['toolbar'] = 'None'
import pylab, psyco, wx
psyco.full()
x = list()
y = list()
o = list()
localisation = Localisation()
loopcount = 0
control = numpy.zeros(3)
ax = pylab.subplot(111)
canvas = ax.figure.canvas
particleplot, = pylab.plot([0,0],[0,0], marker='o', color='k', linewidth=0, markersize=2, animated=True)
estimateplot, = pylab.plot([0,0],[0,0], marker='o', animated=True)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
canvas.draw()
canvas.gui_repaint()
def update_plot(*args):
""" hmm """
global control, loopcount, localisation
if update_plot.background is None:
update_plot.background = canvas.copy_from_bbox(ax.bbox)
starttime = time.time()
localisation.update(control, None)
#x.append(localisation.State[0])
#y.append(localisation.State[1])
#o.append(localisation.State[2])
loopcount += 1
if loopcount == 2:
print "Starting"
control = numpy.array([200,-0.5,0])
canvas.restore_region(update_plot.background)
estimateplot.set_data(x,y)
particleplot.set_data(100*numpy.cos(localisation.States[:,Localisation.THETA]), 100*numpy.sin(localisation.States[:,Localisation.THETA]))
ax.draw_artist(particleplot)
#ax.draw_artist(estimateplot)
canvas.blit(ax.bbox)
time.sleep(max(0,0.1 - (time.time() - starttime)))
wx.WakeUpIdle()
update_plot.background = None
wx.EVT_IDLE(wx.GetApp(), update_plot)
pylab.show()
| gpl-3.0 |
allrod5/extra-trees | benchmarks/classification/accuracy.py | 1 | 3820 | # Modified for MCZA015-13 class project by Rodrigo Martins de Oliveira
# License: BSD Style.
import matplotlib.pyplot as plt
import pandas
from sklearn import model_selection
from sklearn.ensemble import ExtraTreesClassifier as SKExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_breast_cancer, load_iris, load_wine
from extra_trees.ensemble.forest import ExtraTreesClassifier
# prepare models
classification_models = [
('Logistic', LogisticRegression()),
('Nearest Neighbors', KNeighborsClassifier()),
('SVM', SVC()),
('DecisionTree', DecisionTreeClassifier()),
('RandomForest', RandomForestClassifier()),
('ExtraTrees (SciKit)', SKExtraTreesClassifier()),
('ExtraTrees', ExtraTreesClassifier()),
]
seed = 7
print("breast_cancer")
breast_cancer = load_breast_cancer()
X, y = breast_cancer.data, breast_cancer.target
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in classification_models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('breast_cancer')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
print("iris")
iris = load_iris()
X, y = iris.data, iris.target
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in classification_models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('iris')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
print("wine")
wine = load_wine()
X, y = wine.data, wine.target
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in classification_models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('wine')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
print("diabetes")
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in classification_models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
| mit |
sdiazpier/nest-simulator | pynest/examples/one_neuron.py | 14 | 3680 | # -*- coding: utf-8 -*-
#
# one_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
See Also
~~~~~~~~
:doc:`twoneurons`
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (neurons and devices) are created using ``Create``.
# We store the returned handles in variables for later reference.
# The ``Create`` function also allow you to create multiple nodes
# e.g. ``nest.Create('iaf_psc_alpha',5)``
# Also default parameters of the model can be configured using ``Create``
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_psc_alpha", params=[{'I_e':376.0}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, we set the external current of the neuron.
neuron.I_e = 376.0
###############################################################################
# Fourth, the neuron is connected to the voltmeter. The command
# ``Connect`` has different variants. Plain ``Connect`` just takes the
# handles of pre- and postsynaptic nodes and uses the default values
# for weight and delay. Note that the connection direction for the voltmeter is
# reversed compared to the spike recorder, because it observes the
# neuron instead of receiving events from it. Thus, ``Connect``
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time and display the plot using pyplot.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
greulist137/deep-learning-master | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
petosegan/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
apache/spark | python/pyspark/sql/udf.py | 19 | 20044 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
import sys
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import StringType, DataType, StructType, _parse_datatype_string
from pyspark.sql.pandas.types import to_arrow_type
__all__ = ["UDFRegistration"]
def _wrap_function(sc, func, returnType):
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
def _create_udf(f, returnType, evalType, name=None, deterministic=True):
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(
f, returnType=returnType, name=name, evalType=evalType, deterministic=deterministic)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
Notes
-----
The constructor of this class is not supposed to be directly called.
Use :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
to create this instance.
"""
def __init__(self, func,
returnType=StringType(),
name=None,
evalType=PythonEvalType.SQL_BATCHED_UDF,
deterministic=True):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func)))
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid return type: returnType should be DataType or str "
"but is {}".format(returnType))
if not isinstance(evalType, int):
raise TypeError(
"Invalid evaluation type: evalType should be an int but is {}".format(evalType))
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, '__name__')
else func.__class__.__name__)
self.evalType = evalType
self.deterministic = deterministic
@property
def returnType(self):
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or \
self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with scalar Pandas UDFs: %s is "
"not supported" % str(self._returnType_placeholder))
elif self.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped map Pandas UDFs or "
"at groupby.applyInPandas: %s is not supported" % str(
self._returnType_placeholder))
else:
raise TypeError("Invalid return type for grouped map Pandas "
"UDFs or at groupby.applyInPandas: return type must be a "
"StructType.")
elif self.evalType == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in mapInPandas: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid return type in mapInPandas: "
"return type must be a StructType.")
elif self.evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in cogroup.applyInPandas: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid return type in cogroup.applyInPandas: "
"return type must be a StructType.")
elif self.evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
try:
# StructType is not yet allowed as a return type, explicitly check here to fail fast
if isinstance(self._returnType_placeholder, StructType):
raise TypeError
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped aggregate Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
return self._returnType_placeholder
@property
def _judf(self):
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction(
self._name, wrapped_func, jdt, self.evalType, self.deterministic)
return judf
def __call__(self, *cols):
judf = self._judf
sc = SparkContext._active_spark_context
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
# This function is for improving the online help system in the interactive interpreter.
# For example, the built-in help / pydoc.help. It wraps the UDF with the docstring and
# argument annotation. (See: SPARK-19161)
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
wrapper._unwrapped = self
return wrapper
def asNondeterministic(self):
"""
Updates UserDefinedFunction to nondeterministic.
.. versionadded:: 2.3
"""
# Here, we explicitly clean the cache to create a JVM UDF instance
# with 'deterministic' updated. See SPARK-23233.
self._judf_placeholder = None
self.deterministic = False
return self
class UDFRegistration(object):
"""
Wrapper for user-defined function registration. This instance can be accessed by
:attr:`spark.udf` or :attr:`sqlContext.udf`.
.. versionadded:: 1.3.1
"""
def __init__(self, sparkSession):
self.sparkSession = sparkSession
def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
.. versionadded:: 1.3.1
Parameters
----------
name : str,
name of the user-defined function in SQL statements.
f : function, :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see the examples below.
Returns
-------
function
a user-defined function
Notes
-----
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
Examples
--------
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)='4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)='3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function (from Spark 2.3.0):
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf
>>> @pandas_udf("integer") # doctest: +SKIP
... def add_one(s: pd.Series) -> pd.Series:
... return s + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer") # doctest: +SKIP
... def sum_udf(v: pd.Series) -> int:
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, 'asNondeterministic'):
if returnType is not None:
raise TypeError(
"Invalid return type: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType)
if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF]:
raise ValueError(
"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF, "
"SQL_SCALAR_PANDAS_ITER_UDF, SQL_GROUPED_AGG_PANDAS_UDF or "
"SQL_MAP_PANDAS_ITER_UDF.")
register_udf = _create_udf(
f.func, returnType=f.returnType, name=name,
evalType=f.evalType, deterministic=f.deterministic)._unwrapped
return_udf = f
else:
if returnType is None:
returnType = StringType()
return_udf = _create_udf(
f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF, name=name)
register_udf = return_udf._unwrapped
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
.. versionadded:: 2.3.0
Parameters
----------
name : str
name of the user-defined function
javaClassName : str
fully qualified name of java class
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength('test')").collect() # doctest: +SKIP
[Row(javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength2('test')").collect() # doctest: +SKIP
[Row(javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength3('test')").collect() # doctest: +SKIP
[Row(javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
.. versionadded:: 2.3.0
name : str
name of the user-defined aggregate function
javaClassName : str
fully qualified name of java class
Examples
--------
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
... # doctest: +SKIP
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> q = "SELECT name, javaUDAF(id) as avg from df group by name order by name desc"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(name='b', avg=102.0), Row(name='a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.udf tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/notebook/notebookapp.py | 2 | 49051 | # coding: utf-8
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, print_function
import base64
import binascii
import datetime
import errno
import importlib
import io
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import webbrowser
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The Jupyter Notebook requires tornado >= 4.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (4,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
from notebook import (
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.sessions.sessionmanager import SessionManager
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler, IPythonHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel, NATIVE_KERNEL_NAME
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float,
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from .utils import url_path_join, check_pid, url_escape
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'notebook.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, log,
base_url, default_url, settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
ipython_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), **jenv_opt)
sys_info = get_sys_info()
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=ipython_app.static_file_path,
static_custom_path=ipython_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=ipython_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=ipython_app.iopub_msg_rate_limit,
iopub_data_rate_limit=ipython_app.iopub_data_rate_limit,
rate_limit_window=ipython_app.rate_limit_window,
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=ipython_app.login_handler_class,
logout_handler_class=ipython_app.logout_handler_class,
password=ipython_app.password,
xsrf_cookies=True,
disable_check_xsrf=ipython_app.disable_check_xsrf,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# IPython stuff
jinja_template_vars=ipython_app.jinja_template_vars,
nbextensions_path=ipython_app.nbextensions_path,
websocket_url=ipython_app.websocket_url,
mathjax_url=ipython_app.mathjax_url,
config=ipython_app.config,
config_dir=ipython_app.config_dir,
jinja2_env=env,
terminals_available=False, # Set later if terminals are available
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
handlers.extend(load_handlers('tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('files.handlers'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('kernelspecs.handlers'))
handlers.extend(load_handlers('edit.handlers'))
handlers.extend(load_handlers('services.api.handlers'))
handlers.extend(load_handlers('services.config.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.contents.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
handlers.extend(load_handlers('services.kernelspecs.handlers'))
handlers.extend(load_handlers('services.security.handlers'))
# BEGIN HARDCODED WIDGETS HACK
# TODO: Remove on notebook 5.0
try:
import widgetsnbextension
except:
try:
import ipywidgets as widgets
handlers.append(
(r"/nbextensions/widgets/(.*)", FileFindHandler, {
'path': widgets.find_static_assets(),
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
except:
app_log.warning('Widgets are unavailable. Please install widgetsnbextension or ipywidgets 4.0')
# END HARDCODED WIDGETS HACK
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NbserverListApp(JupyterApp):
version = __version__
description="List currently running notebook servers."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.runtime_dir):
if self.json:
print(json.dumps(serverinfo))
else:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
"DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = """
The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager,
ContentsManager, FileContentsManager, NotebookNotary,
KernelSpecManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
def _log_level_default(self):
return logging.INFO
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
def _log_format_default(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help='Use minified JS file or not, mainly use during dev to avoid JS recompilation',
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
default_url = Unicode('/tree', config=True,
help="The default URL to redirect to from `/`"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_default(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warn("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
client_ca = Unicode(u'', config=True,
help="""The full path to a certificate authority certificate for SSL/TLS client authentication."""
)
cookie_secret_file = Unicode(config=True,
help="""The file where the cookie secret is stored."""
)
def _cookie_secret_file_default(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
return f.read()
else:
secret = base64.encodestring(os.urandom(1024))
self._write_cookie_secret_file(secret)
return secret
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file)
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warn(
"Could not set permissions on %s",
self.cookie_secret_file
)
token = Unicode('<generated>', config=True,
help="""Token used for authenticating first-time connections to the server.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
"""
)
one_time_token = Unicode(
help="""One-time token used for opening a browser.
Once used, this token cannot be used again.
"""
)
_token_generated = True
def _token_default(self):
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
def _token_changed(self, name, old, new):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="DEPRECATED, use tornado_settings"
)
def _webapp_settings_changed(self, name, old, new):
self.log.warn("\n webapp_settings is deprecated, use tornado_settings.\n")
self.tornado_settings = new
tornado_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses.")
cookie_options = Dict(config=True,
help="Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details."
)
ssl_options = Dict(config=True,
help="""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details.""")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
jinja_template_vars = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_url = '/'+new
elif not new.endswith('/'):
self.base_url = new+'/'
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
def _base_project_url_changed(self, name, old, new):
self.log.warn("base_project_url is deprecated, use base_url")
self.base_url = new
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help="""Path to search for custom.js, css"""
)
def _static_custom_path_default(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates."""
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help="""extra paths to look for Javascript notebook extensions"""
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
contents_manager_class = Type(
default_value=FileContentsManager,
klass=ContentsManager,
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
config=True,
help='The kernel manager class to use.'
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help='The session manager class to use.'
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help='The config manager class to use'
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help='The login handler class to use.',
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help='The logout handler class to use.',
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
pylab = Unicode('disabled', config=True,
help="""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
"""
)
def _pylab_changed(self, name, old, new):
"""when --pylab is specified, display a warning and exit"""
if new != 'warn':
backend = ' %s' % new
else:
backend = ''
self.log.error("Support for specifying --pylab on the command line has been removed.")
self.log.error(
"Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help="The directory to use for notebooks and kernels."
)
def _notebook_dir_default(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
def _notebook_dir_validate(self, value, trait):
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError("No such notebook dir: %r" % value)
return value
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
# setting App.notebook_dir implies setting notebook and kernel dirs as well
self.config.FileContentsManager.root_dir = new
self.config.MappingKernelManager.root_dir = new
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=("DEPRECATED use the nbserver_extensions dict instead")
)
def _server_extensions_changed(self, name, old, new):
self.log.warning("server_extensions is deprecated, use nbserver_extensions")
self.server_extensions = new
nbserver_extensions = Dict({}, config=True,
help=("Dict of Python modules to load as notebook server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order.")
)
reraise_server_extension_failures = Bool(
False,
config=True,
help="Reraise exceptions encountered loading server extensions?",
)
iopub_msg_rate_limit = Float(0, config=True, help="""(msg/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
iopub_data_rate_limit = Float(0, config=True, help="""(bytes/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
rate_limit_window = Float(1.0, config=True, help="""(sec) Time window used to
check the message and data rate limits.""")
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
config_dir=os.path.join(self.config_dir, 'nbconfig'),
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['token'] = self.token
if (self.open_browser or self.file_to_run) and not self.password:
self.one_time_token = binascii.hexlify(os.urandom(24)).decode('ascii')
self.tornado_settings['one_time_token'] = self.one_time_token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# Disable SSLv3 by default, since its use is discouraged.
ssl_options.setdefault('ssl_version', ssl.PROTOCOL_TLSv1)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
url = self._url(ip)
if self.token:
# Don't log full token if it came from config
token = self.token if self._token_generated else '...'
url = url_concat(url, {'token': token})
return url
query = '?token=%s' % self.token if self.token else ''
return self._url(ip) + query
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_terminals(self):
try:
from .terminal import initialize
initialize(self.web_app, self.notebook_dir, self.connection_url)
self.web_app.settings['terminals_available'] = True
except ImportError as e:
log = self.log.debug if sys.platform == 'win32' else self.log.warn
log("Terminals not available (error was %s)", e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.current().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.current().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.current().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
for modulename in sorted(self.nbserver_extensions):
if self.nbserver_extensions[modulename]:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning("Error loading server extension %s", modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered an incorrect
# mimetype for CSS in the registry. Tornado uses this when serving
# .css files, causing browsers to reject the stylesheet. We know the
# mimetype always needs to be text/css, so we override it here.
mimetypes.add_type('text/css', '.css')
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The Jupyter Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
super(NotebookApp, self).start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical("%s does not exist" % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
else:
# default_url contains base_url, but so does connection_url
uri = self.default_url[len(self.base_url):]
if self.one_time_token:
uri = url_concat(uri, {'token': self.one_time_token})
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
self.log.critical('\n'.join([
'\n',
'Copy/paste this URL into your browser when you connect for the first time,',
'to login with a token:',
' %s' % url_concat(self.connection_url, {'token': self.token}),
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.remove_server_info_file()
self.cleanup_kernels()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file in os.listdir(runtime_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(runtime_dir, file), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
| mit |
ccarouge/cwsl-mas | cwsl/vt_modules/vt_plot_gridded_seas.py | 4 | 5636 | """
Creates a gridded seasonal plot
This module wraps the plot_gridded_seas.sh script found in git repository cwsl-ctools.
Part of the CWSLab VisTrails plugin.
Authors: Craig Heady, [email protected]
Tim Bedin, [email protected]
Tim Erwin, [email protected]
Copyright 2015 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from vistrails.core.modules import vistrails_module
from vistrails.core.modules.basic_modules import String, List
from cwsl.configuration import configuration
from cwsl.core.process_unit import ProcessUnit
from cwsl.core.constraint import Constraint
from cwsl.core.pattern_generator import PatternGenerator
class PlotGriddedSeas(vistrails_module.Module):
"""
Plots a gridded field(season).
Required inputs:
Other inputs: infile (from dataset connector)
title (model name from dataset connector)
Requires: python, cdat, cct, matplotlib, basemap
region's:
'AUS_PCCSP'
'PACIFIC'
'PACCSAP'
'VANUATU'
'AUSTRALIA'
'AUSTRALIA_NZ'
'AUSTRALIA_EXT'
'AUSTRALIA_EXT_NZ'
'SE_AUSTRALIA'
'QLD'
'SEQLD'
'BRISBANE'
'WORLD'
'WORLD360'
'STH_SULAWESI'
'MAKASSAR'
'INDONESIA'
'EAST_INDONESIA'
'NTB'
'ZOOM'
'AUREL_AND_LAWSON'
'MONSOONAL_NORTH'
'WET_NORTH'
'RANGELANDS'
'MURRAY_BASIN'
'EAST_COAST'
'SOUTHERN_SLOPES'
'S&SW_FLATLANDS'
'BBBASIN'
'INDIA'
'SOUTHASIA'
'australia_NRM-eval'
'australia_NRM-eval-psl'
"""
# Define the module ports.
_input_ports = [('in_dataset', 'csiro.au.cwsl:VtDataSet',
{'labels': str(['Input Dataset'])}),
('variable', String, {"defaults": str([''])}),
('plot_type', String, {"defaults": str(['pcolor'])}),
('title', String, {"defaults": str([''])}),
('region', String, {"defaults": str(['WORLD360'])}),
('colormap', String, {"defaults": str([''])}),
('ticks', String, {"defaults": str([''])}),
('conv_units', String, {"defaults": str(['False'])}),
]
_output_ports = [('out_dataset', 'csiro.au.cwsl:VtDataSet',)]
_execution_options = {'required_modules': ['cdo','python/2.7.5','python-cdat-lite/6.0rc2-py2.7.5',
'cct/trunk','python/2.7.5-matplotlib',
'python-basemap/1.0.7-py2.7']
}
def __init__(self):
super(PlotGriddedSeas, self).__init__()
self.command = '${CWSL_CTOOLS}/visualisation/plot_gridded_seas.py'
# Get the output pattern using the PatternGenerator object.
# Gets the user infomation / authoritative path etc from the
# user configuration.
self.out_pattern = PatternGenerator('user', 'default').pattern
def compute(self):
# Required input
in_dataset = self.getInputFromPort("in_dataset")
variable = self.getInputFromPort("variable")
#self.positional_args=[(variable_name, 0, 'raw')]
plot_type = self.getInputFromPort("plot_type")
#self.positional_args=[(plot_type, 1, 'raw')]
title = self.getInputFromPort("title")
#self.positional_args=[(title, 2, 'raw')]
region = self.getInputFromPort("region")
#self.positional_args=[(plot_type, 3, 'raw')]
colormap = self.getInputFromPort("colormap")
#self.positional_args=[(colormap, 4, 'raw')]
ticks = self.getInputFromPort("ticks")
#self.positional_args=[(plot_type, 5, 'raw')]
conv_units = self.getInputFromPort("conv_units")
#self.positional_args=[(conv_units, 6, 'raw')]
cons_for_output = set([Constraint('suffix', ['png'])])
run_opts = ''
if variable:
run_opts = run_opts + ' --variable %s' %variable
if plot_type:
run_opts = run_opts + ' --plot_type %s' %plot_type
if title:
run_opts = run_opts + ' --title %s' %title
if region:
run_opts = run_opts + ' --region %s' %region
if colormap:
run_opts = run_opts + ' --colourmap %s' %colormap
if ticks:
run_opts = run_opts + " --ticks '%s'" %ticks
if conv_units:
run_opts = run_opts + " --conv_units '%s'" %conv_units
# Execute plotting process.
this_process = ProcessUnit([in_dataset],
self.out_pattern,
self.command,
cons_for_output,
execution_options=self._execution_options,
#positional_args=self.positional_args,
kw_string=run_opts)
#kw_string="--title '${model}_${experiment}'")
try:
process_output = this_process.execute(simulate=configuration.simulate_execution)
except Exception as e:
raise vistrails_module.ModuleError(self, repr(e))
self.setResult('out_dataset', process_output)
| apache-2.0 |
imaculate/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12303 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
LohithBlaze/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
holsety/tushare | tushare/datayes/fund.py | 17 | 7543 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Fund():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Fund(self, etfLof='', listStatusCd='', secID='', ticker='', category='',
operationMode='', field=''):
"""
获取基金的基本档案信息,包含基金名称、交易代码、分级情况、所属类别、保本情况、上市信息、相关机构、投资描述等信息。
收录了2005年以来的历史数据,数据更新频率为不定期。
"""
code, result = self.client.getData(vs.FUND%(etfLof, listStatusCd, secID,
ticker, category, operationMode, field))
return _ret_data(code, result)
def FundNav(self, dataDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取某只基金的历史净值数据(货币型、短期理财债券型除外),包括了单位份额净值、累计净值与复权净值。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDNAV%(dataDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundDivm(self, dataDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取某只货币型基金或短期理财债券型基金的历史收益情况,包含了每万份收益,七日年化收益率等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDDIVM%(dataDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundDiv(self, secID='', ticker='', adjustedType='', beginDate='', endDate='', field=''):
"""
获取基金的净值调整信息,包括基金分红和基金拆分两种调整情况。分红包含每份分红,除息日,分红在投资日;拆分包含份额折算比例,拆分日。
收录了2005年以来的历史数据,数据更新频率为不定期。
"""
code, result = self.client.getData(vs.FUNDDIV%(secID, ticker, adjustedType,
beginDate, endDate, field))
return _ret_data(code, result)
def FundAssets(self, reportDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取基金定期披露的资产配置情况,包含了资产总值、资产净值,以及资产总值中权益类、固定收益类、现金及其他四种资产的市值与占比情况。
收录了2005年以来的历史数据,数据更新频率为季度。获取方式支持:
1)输入一个或多个secID/ticker,并输入beginDate和endDate,可以查询到指定基金,一段时间的资产配置;
2)输入reportDate,不输入其他参数,可以查询到输入日期的全部基金资产配置
"""
code, result = self.client.getData(vs.FUNDASSETS%(reportDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundHoldings(self, reportDate='', secID='', ticker='', beginDate='', endDate='',
secType='', field=''):
"""
获取基金定期披露的持仓明细,包含所持有的股票、债券、基金的持仓明细数据。收录了2005年以来的历史数据,数据更新频率为季度。获取方式支持:
1)输入一个或多个secID/ticker,并输入beginDate和endDate,可以查询到指定基金,一段时间的基金持仓;
2)输入reportDate,不输入其他参数,可以查询到输入日期的全部基金持仓数据。
"""
code, result = self.client.getData(vs.FUNDHOLDINGS%(reportDate, secID, ticker,
beginDate, endDate, secType, field))
return _ret_data(code, result)
def FundETFPRList(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取ETF基金交易日的申赎清单基本信息,包含标的指数名称,上一交易日的现金差额、最小申赎单位净值、
单位净值,交易日当日的预估现金差额、最小申赎单位、现金替代比例上限、是否允许申购赎回、是否公布IOPV等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近两天的数据。
"""
code, result = self.client.getData(vs.FUNDETFPRLIST%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundETFCons(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取ETF基金每个交易日的跟踪的标的指数成分券清单,包含成分券的代码、简称、股票数量、现金替代溢价比、固定替代金额等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近两天的数据。
"""
code, result = self.client.getData(vs.FUNDETFCONS%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundRating(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取上海证券基金评级信息。收录了10年以来的历史数据,数据更新频率为月。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDRATING%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundSharesChg(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取场内基金的份额变动信息,包含基金名称、交易代码、交易市场、截止日期、流通份额等信息。收录了2005年以来的历史数据,数据更新频率为日。
"""
code, result = self.client.getData(vs.FUNDSHARESCHG%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundLeverageInfo(self, exchangeCDLeverage='', secID='', ticker='', field=''):
"""
获取分级基金的基本信息,包含母、子基金名称、交易代码、分拆比例、折算等信息。
"""
code, result = self.client.getData(vs.FUNDLEVERAGEINFO%(exchangeCDLeverage, secID, ticker, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
roman-dvorak/SolarForecast | tools/myFunct.py | 1 | 11601 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
import os
import time
import datetime
from datetime import datetime
import ftplib
import ConfigParser
import operator
from array import array
import subprocess
import Image
import math
import statistics
from pylab import plot, ylim, xlim, show, xlabel, ylabel, grid
from numpy import linspace, loadtxt, ones, convolve
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import pyfits
import numpy as np
import numpy as numpy
import sunpy
from sunpy import lightcurve
from sunpy.net import hek
import pandas
import pandas as pd
import scipy
import scipy as sp
from scipy.interpolate import interp1d
from scipy import stats
arrA = []
arrB = []
arrAvg=[]
newname = ""
Flat = [0.0]
Dark = [0.0]
arr = [0.0]
#path = "/home/roman/Dokumenty/Projects/Astronomy/Spectrum/2014/140611/spectra/flat/"
path="/home/roman/Dokumenty/Projects/Astronomy/Spectrum/2014/140611/spectra/AR12087/0843/"
#path="/media/roman/eHDD/Dokumenty/Projects/Astronomy/Spectrum/140611/spectra/AR12087/0807"
path_sj="/home/roman/Dokumenty/Projects/Astronomy/Spectrum/2014/140611/sj/new/"
path_hessi="/home/roman/Dokumenty/Projects/Astronomy/Spectrum/data/"
dirList = os.listdir(path)
dirList.sort()
dirListSJ = os.listdir(path_sj)
dirListSJ.sort()
print dirList
counter = 0
def corespond(path_sj, newname):
#
# z newname vytrori jmeno obrazku s celou cestou
#
# in path_sj - cesta ke vsem obrazkum sj
# in newname - jmeno txt, ze ktereho se vytvori nazev obrazku; example "20140603_033722_40ms_00010.txt"
#
# out
#
name_sj = ""
dirList = os.listdir(path_sj)
dirList.sort()
print newname[9:11]+"-"+newname[11:13]+"-"+newname[13:15]
for sjfile in dirList:
if sjfile[0:len(sjfile)-4] is newname[9:11]+"-"+newname[11:13]+"-"+newname[13:15]:
print "shoda corespond newname a obrazek v path_sj"
name_sj = sjfile
name_sj = newname[9:11]+"-"+newname[11:13]+"-"+newname[13:15]+".bmp"
return name_sj
# newname - 20140603_033722_40ms_00010.txt -
# sj file - Image0004 07-32-38.bmp -
def compose(path, newname, type, path_sj, compose):
# type = 0 - only graph
# type = 1 - text data
# type = 2 - spectrum and slitjaw
# type = 3 -
#if type is not 0 or 1 or 2 or 3:
# type = 0
try:
if type is 0:
pass
elif type is 1:
print "!!!!!!!!!!!!compose type 1"
img=Image.open("plot/"+newname+".jpg",'r')
img_w,img_h=img.size
background = Image.new('RGBA', (img_w,img_h), (255, 255, 255, 1))
#bg_w,bg_h=background.size
#offset=((bg_w-img_w)/2,(bg_h-img_h)/2)
background.paste(img)
background.save("plot/"+newname+".jpg")
elif type is 2:
print "!!!!!!!!!!!!compose type 2"
img=Image.open("plot/"+newname+".jpg",'r')
img_w,img_h=img.size
background = Image.new('RGBA', (img_w,img_h*2), (200, 200, 200, 1))
try:
print path_sj+compose
img2=Image.open(path_sj+compose,'r')
img2_w,img2_h=img2.size
background = Image.new('RGBA', (img_w,img_h+img2_h), (200, 200, 200, 1))
offset=(0,0)
background.paste(img2,offset)
except:
print "error"
bg_w,bg_h=background.size
offset=(0,img2_h)
background.paste(img,offset)
background.save("plot/"+newname+".jpg")
except:
print "Error: compose neznamy typ"
def monthToNum(date):
return{
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
}[date]
def dayToNum(date):
return{
'Mon' : '01',
'Tue' : '02',
'Wed' : '03',
'Thu' : '04',
'Fri' : '05',
'Sat' : '06',
'Sun' : '07',
}[date]
def parser(arrA, arrB, path, soubor):
global newname
#
# ze souboru ze spektogramu vytvori dve pole 'arrA' a 'arrB'
#
# in 'arrA' - pole s vlnovou delkou
# in 'arrB' - pole s hodnotama
# in 'path' - cesta k umisteni 'soubor'u
# in 'soubor'- nazev souboru s daty
#
# glob 'newname' - ; example "20140603_033722_40ms_00010.txt"
#
f=open(path+soubor)
lines=f.readlines()
#minutes = int(lines[2][17:19])*60 + int(lines[2][20:22]) + 4*60 + 30 # cas spekter offsetu od UTC
seconds=(int(float(lines[2][17:19]))+4)*60*60 + (int(float(lines[2][20:22]))+30)*60 +int(float(lines[2][23:25]))
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
newname = "" + lines[2][30:34] + monthToNum(lines[2][10:13]) + lines[2][14:16] + "_" + str('%02d'% h) + str('%02d' % m) + str('%02d' % s) + "_" + soubor
# ^ rok ^ cislo mesice ^ cislo dnu v mesici ^ hodina ^ minuta ^sekunda ^ puvodni jmeno
for i in range(18,3665):
data = lines[i]
arrA.append(float( data[:data.find("\t")] )*10)
arrB.append(float( data[data.find("\t")+1:] ))
arrA[0]=0
arrB[0]=0
return str(newname)#, lines[2], str('%02d'% h)
def erse (arrA, arrB):
#
#vycisti pole 'arrA' a 'arrB'
arrA = []
arrB = []
def average (arrA,arrB, size):
for element in range(1,len(arrA)):
if element > size and element < (len(arrA)-size):
arrB[element] = (arrB[element]+arrB[element-size]+arrB[element+size])/(1+2*size)
def plot (arrA, arrB, newname, xmin, xmax):
#
# vykresli graf 'arrA' v zavislosti na 'arrB' s nadpisem 'newname' a ulozi do souboru
#
# in 'arrA' - pole s osou X
# in 'arrB' - pole s osou Y
# in 'newname' - ; example "20140603_033722_40ms_00010.txt"
# in 'xmin' - minimalni hodnota na ose x
# in 'xmax' - maximalni hodnota na ose x
#
print "elements: ", len(arrA)
f, ax = plt.subplots()
plt.title(r' spectrum '+ newname)
f.set_figwidth(11.7)
f.set_figheight(4.1)
plt.grid()
ax.set_xlim([xmin,xmax])
ax.set_ylim([-1,2])
ax.set_xlabel('Wavelength (Angstroms)')
ax.set_ylabel('value [relative]')
ax.set_xticks(np.arange(xmin,xmax,(xmax-xmin)/10))
ax.plot(arrA, arrB, '-b', linewidth=0.8)
plt.savefig("./plot/"+newname+".jpg", dpi=200)
plt.close()
def timelaps(folder, type):
subprocess.call(["ffmpeg", "-r 25", "-pattern_type glob", "-i 'plot/*.jpg'", "-c:v copy output.avi"])
#ffmpeg -r 25 -pattern_type glob -i 'plot/*.jpg' -c:v copy output.avi
def takeClosest(myList, myNumber):
#
# najde nejblizsi namerenou vlnovou delku dle zadane hodnoty
#
# in 'myList' - sezam vsech vln delek
# in 'myNumber'- hledana vln delka
#
# out 'ext' - index v 'myList'
# out 'closest'- hodnota nejblizsi 'myNumber hodnoty'
#
if myNumber < 34946 or myNumber > 48548:
raise Exception("Hledana vlnova delka je mimo rozsah." + "Pouzijte rozsah 3494.6 az 4854.8.")
vedle = 100
closest = 100
for i in xrange(1, len(myList)):
if abs(myList[i]*10 - myNumber) < vedle:
vedle = abs(myList[i]*10 - myNumber)
closest = myList[i]*10
ext=i
return int(ext), closest
parser(arr, Flat, "/home/roman/Dokumenty/Projects/Astronomy/Spectrum/scripts/", "FlatSpectrum.TXT")
parser(arr, Dark, "/home/roman/Dokumenty/Projects/Astronomy/Spectrum/scripts/", 'DarkSpectrum.TXT')
def smooth(inarr, lenght):
#
# Koluzavy prumer
#
inarr=np.array(inarr)
outarr=np.array(inarr)
for i in xrange(int(lenght/2),int(inarr.shape[0]-lenght)):
suma=0
for xa in xrange(int(-lenght/2),int(lenght/2)):
#print i, xa, inarr[i]
suma=suma+inarr[i+xa]
outarr[i]=suma/lenght
return outarr
def smerodatna_odchylka(data, min=0, max=0, plot=True):
#
# pocita smerodatnou odchylku. Pokud min a max neni nastaveno, pocita se
# z celeho pole. Jinak to je vyber mezi min a max
#
# in 'data' - pole s daty
# in 'min' - minimalni hodnota v poli pro posouzeni
# in 'max' - maximalni hodnota v poli pro posouzeni
# in 'plot' - rozhoduje o vykresleni grafu
#
# out 'out' - smerodatna odchylka
#
data = np.array(data)
if min == 0 and max == 0:
average = np.mean(data)
median = np.median(data)
standardDeviation=np.std(data)
kurtosis = stats.kurtosis(data)
skewness = stats.skew(data)
else:
crop = np.array([])
for x in data:
if min < x < max:
crop=np.append(crop,x)
average = np.mean(crop)
# modus = stats.mode(crop)
# modus = statistics.mode(crop) !!!!!
median = np.median(crop)
standardDeviation=np.std(crop)
kurtosis = stats.kurtosis(crop)
skewness = stats.skew(crop)
if plot:
plt.figure()
plt.axvspan(float(min), float(max), alpha=0.3, color='k')
plt.axvspan(average-standardDeviation, average+standardDeviation, alpha=0.4, color='b')
plt.axvspan(average+standardDeviation, average+standardDeviation+standardDeviation, alpha=0.4, color='r')
plt.axvspan(average-standardDeviation, average-standardDeviation-standardDeviation, alpha=0.4, color='r')
plt.axvline(x=median, linewidth=2, color='r')
plt.axvline(x=average, linewidth=2, color='g')
#plt.axvline(x=modus[0], linewidth=2, color='b')
plt.hist(data, 1.0+3.3*math.log(np.shape(data)[0]), facecolor='green', alpha=0.75)
plt.text(average, 10, "std: "+ str(standardDeviation),
bbox={'facecolor':'green', 'alpha':0.75, 'pad':10})
plt.show(block=False)
print "___________________________________________________________"
print "výběr hodnot od ", float(min), " po ", float(max)
print "průměr: ", average
print "median: ", median
print "smerodatn odchylka je: ", standardDeviation
print "spicatost: ", kurtosis
print "sikmost: ", skewness
return standardDeviation
def plotMenu():
def hlavicka(msg=" "):
print msg
print "--------------------"
print "Menu"
print "____________________"
print " "
print "Ukoncit:", "\t\t\t" , "Enter"
print "smerodatna odchylka", "\t\t" , "1" +" + "+"Enter"
print "Najit SJ", "\t\t" , "2+" " + "+"Enter"
print ""
input=None
hlavicka()
while input != "" and input != "1" and input != "2" and input != "" and input != "":
input = raw_input("zadejte a potvrdte hodnotu: ")
if input != "" and input != "1" and input != "2" and input != "" and input != "":
hlavicka("zadal jste spatnou hodnotu")
else:
print "zadal jste:", input
if input == "":
plt.close()
raise SystemExit("Aplikace se ukoncuje")
return input
def findSJ(var1, path_sj):
#dt=datetime.strptime(var1,'%H-%M-%S')
if os.path.isfile(path_sj+var1+".bmp"):
img=mpimg.imread(path_sj+var1+".bmp")
imgplot = plt.imshow(img)
show(block=False)
else:
print "Nic nenalezeno"
#print(dt)
| gpl-3.0 |
saifrahmed/bokeh | scripts/interactive_tester.py | 43 | 9271 | from __future__ import print_function
import argparse
import importlib
import os
from shutil import rmtree
from six.moves import input
import sys
import textwrap
import time
import json
# TODO:
# catch and log exceptions in examples files that fail to open
DIRECTORIES = {
'file' : '../../examples/plotting/file',
'notebook': '../../examples/plotting/notebook',
'server' : '../../examples/plotting/server',
'ggplot' : '../../examples/compat/ggplot',
'glyphs' : '../../examples/glyphs',
'mpl' : '../../examples/compat/mpl',
'pandas' : '../../examples/compat/pandas',
'seaborn' : '../../examples/compat/seaborn',
'charts' : '../../examples/charts',
}
DEFAULT_TEST_FILES = [
'../../examples/plotting/file/stocks.py',
'../../examples/plotting/file/glucose.py',
'../../examples/compat/ggplot/density.py',
'../../examples/plotting/server/stocks.py',
'../../examples/plotting/server/glucose.py',
'../../examples/plotting/notebook/candlestick.ipynb',
'../../examples/plotting/notebook/glucose.ipynb',
'../../examples/compat/seaborn/violin.py',
'../../examples/charts/boxplot.py',
]
SESSION_FILE = os.path.abspath("INTERACTIVE_TESTER_SESSION.json")
def get_parser():
"""Create the parser that will be used to add arguments to the script.
"""
parser = argparse.ArgumentParser(description=textwrap.dedent("""
Tests a selection of .py or .ipynb bokeh example files.
The --location option allows you to select a specific examples subdirectory to test all files in,
ignoring __init__.py
Location arguments can be any valid path to a folder with the examples, like:
-l /path/to/my/examplesyou can choose:
or any of the pre-built keywords that point to the related examples:
- file
- notebook
- server
- ggplot
- glyphs
- mpl
- pandas
- seaborn
"""), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-log', action='store_true', dest='nolog', default=False,
help="don't save a log of any errors discovered")
parser.add_argument('-l', '--location', action='store', default=False,
help="example directory in which you wish to test")
parser.add_argument('--reuse-session', action='store_true', dest='reuseSession', default=False,
help="do not clean last session log and start from where you left")
parser.add_argument('--notebook-options', action='store', dest='notebookOptions', default="",
help="options to be forwarded to ipython notebook to customize it's behaviour")
return parser
def depend_check(dependency):
"""
Make sure a given dependency is installed
"""
try:
importlib.import_module(dependency)
found = True
except ImportError as e:
print("%s\nPlease use conda or pip to install the necessary dependency." % (e))
found = False
return found
def save_session(session):
"""
Save the session object to the SESSION_FILE
Args:
session(dict): dict with all the example files and results of each run
"""
with open(SESSION_FILE, 'w') as res_file:
json.dump(session, res_file)
def get_session():
"""
Return last stored session
"""
try:
with open(SESSION_FILE, 'r') as res_file:
return json.load(res_file)
except IOError:
return {}
def clean_session():
"""
Removes previous session file
"""
if os.path.exists(SESSION_FILE):
os.remove(SESSION_FILE)
def main(testing_ground=None, notebook_options=""):
"""
Collect and run .py or .ipynb examples from a set list or given examples directory, ignoring __init__.py
User input is collected to determine a properly or improperly displayed page
"""
# Create a testing directory if one does not exist, then cd into it
testing_directory = 'tmp_test'
if not os.path.exists(testing_directory):
os.mkdir(testing_directory)
os.chdir(testing_directory)
if testing_ground:
log_name = results.location
TestFiles = [
fileName for fileName in os.listdir('%s/.' % testing_ground)
if fileName.endswith(('.py', '.ipynb')) and fileName != '__init__.py'
]
else:
log_name = "fast"
TestFiles = DEFAULT_TEST_FILES
Log = []
lastSession = get_session()
for index, fileName in enumerate(TestFiles):
if testing_ground:
fileName = "%s/%s" % (testing_ground, fileName)
try:
if not fileName in lastSession:
lastSession[fileName] = "TESTING..."
save_session(lastSession)
command = get_cmd(fileName, notebook_options)
opener(fileName, command)
if results.nolog:
# Don't display 'next file' message after opening final file in a dir
if index != len(TestFiles)-1:
input("\nPress enter to open next file ")
else:
ErrorReport = test_status()
if ErrorReport:
Log.append("\n\n%s: \n %s" % (fileName, ErrorReport))
lastSession[fileName] = ErrorReport
save_session(lastSession)
else:
prevRes = lastSession[fileName]
if prevRes == "TESTING...":
print("RESULT OF %s LAST RUN NOT REGISTERED!!" % fileName)
ErrorReport = test_status()
lastSession[fileName] = ErrorReport
save_session(lastSession)
else:
print("%s detected in last session: SKIPPING" % fileName)
except (KeyboardInterrupt, EOFError):
break
# exit the testing directory and delete it
os.chdir('../')
rmtree(testing_directory)
if Log:
logger(Log, log_name)
def get_cmd(some_file, notebook_options=""):
"""Determines how to open a file depending
on whether it is a .py or a .ipynb file
"""
if some_file.endswith('.py'):
command = "python"
elif some_file.endswith('.ipynb'):
command = "ipython notebook %s" % notebook_options
return command
def opener(some_file, command):
"""Print to screen what file is being opened and then open the file using
the command method provided.
"""
print("\nOpening %s\n" % some_file.strip('../'))
os.system("%s %s" % (command, some_file))
def test_status():
"""Collect user input to determine if a file displayed correctly or incorrectly.
In the case of incorrectly displayed plots, an 'ErrorReport' string is returned.
"""
status = input("Did the plot(s) display correctly? (y/n) ")
while not status.startswith(('y', 'n')):
print("")
status = input("Unexpected answer. Please type y or n. ")
if status.startswith('n'):
ErrorReport = input("Please describe the problem: ")
return ErrorReport
def logger(error_array, name):
"""
Log errors by appending to a .txt file. The name and directory the file is saved into
is provided by the name and log_dir args.
"""
logfile = "%s_examples_testlog.txt" % name
if os.path.exists(logfile):
os.remove(logfile)
with open(logfile, 'a') as f:
print("")
print("\nWriting error log to %s" % logfile)
for error in error_array:
f.write("%s\n" % error)
if __name__ == '__main__':
if not depend_check('bokeh'):
sys.exit(1)
parser = get_parser()
results = parser.parse_args()
if results.location:
if results.location and results.location in DIRECTORIES:
target = results.location
if target in ['ggplot', 'pandas', 'seaborn', 'charts']:
if not depend_check(target):
sys.exit(1)
test_dir = DIRECTORIES[target]
elif os.path.exists(results.location):
# in case target is not one of the recognized keys and is a
# valid path we can run the examples in that folder
test_dir = results.location
print("Running examples in custom location:", test_dir)
else:
print("Test location '%s' not recognized.\nPlease type 'python interactive_tester.py -h' for a list of valid test directories."
% results.location)
sys.exit(1)
else:
test_dir = None
if results.location == 'server' or test_dir is None:
print("Server examples require bokeh-server. Make sure you've typed 'bokeh-server' in another terminal tab.")
time.sleep(5)
if not results.reuseSession:
print("cleaning previous session file...",)
clean_session()
print("OK")
main(test_dir, notebook_options=results.notebookOptions)
| bsd-3-clause |
schmidi093/RTVC | Design/attitude_control/kalman_filter_python/kalman_attitude.py | 1 | 2563 | import numpy as np
import matplotlib.pyplot as plt
# state transformation matrix
A = np.matrix([[1, 0.01, 0],[0, 1, 0.01], [0, 0, 1]])
# command matrix
B = np.matrix([[0.0],[0.0],[3.0*0.04/8.1e-4]])
# B = np.matrix([[0.0],[0.0],[0.0]])
# initial state (known or estimated)
X = np.matrix([[0.0],[0.0],[0.0]])
# initial error covariance
P = np.matrix(np.identity(3))
# system error covariance
Q = 0.01 * np.matrix(np.identity(3))
# measurement to state translation
H = np.matrix([0.0,1.0,0.0])
# measurement noise covariance
R = np.matrix([0.02])
X_prio_hist = []
P_prio_hist = []
u_hist = []
K_hist = []
X_hist = []
P_hist = []
Z=np.loadtxt("gammadot_noise.csv")
U=np.loadtxt("command_exact.csv")
gamma=np.loadtxt("gamma_exact.csv")
gammadot=np.loadtxt("gammadot_exact.csv")
e_int = 0
k_P = -5.0
k_I = -5.0
k_D = -1.0
"""# updating K and P
# determine command from PID
for z in Z:
e_int = e_int + 0.01 * X[0,0]
u = k_P * X[0,0] + k_I * e_int + k_D * X[1,0]
# take command from file
# for z, u in zip(Z, U):
# prediction
# X_prio = A*X + B*u
X_prio = A*X
P_prio = A*P*A.T + Q
# correction
K = P_prio*H.T*np.linalg.inv(H*P_prio*H.T+R)
X = X_prio + K*(z-H*X_prio)
P = (np.matrix(np.identity(3)) - K*H)*P_prio
# save history
u_hist.append(u)
X_prio_hist.append(X_prio)
P_prio_hist.append(P_prio)
K_hist.append(K)
X_hist.append(X)
P_hist.append(P)"""
# # converged K and P from last run
K = np.matrix([[0.00733],
[0.267],
[0.258]])
# P = np.matrix([[ 0.04871656, 0.08012494, 0.04486424],
# [ 0.08012494, 1.88353956, 1.08584885],
# [ 0.04486424, 1.08584885, 1.78593403]])
for z in Z:
# prediction
X_prio = A*X
# correction
X = X_prio + K*(np.matrix(z).T-H*X_prio)
# save history
X_prio_hist.append(X_prio)
X_hist.append(X)
print("Final Kalman gain matrix K")
print(K)
print("Final error covariance matrix P")
print(P)
gamma_hist = np.array(X_hist)[:,0,0]
gammadot_hist = np.array(X_hist)[:,1,0]
# f, ax = plt.subplots(2, sharex=True)
f, ax = plt.subplots(3, sharex=True)
ax[0].set_ylim([-0.012,0.032])
ax[1].set_ylim([-0.001,0.012])
ax[2].set_ylim([-0.09,0.005])
ax[0].set_ylabel("gamma'")
ax[1].set_ylabel("gamma")
ax[2].set_ylabel("u")
ax[0].plot(Z, 'b-')
ax[0].plot(gammadot_hist, 'r-')
ax[0].plot(gammadot, 'g-')
ax[1].plot(gamma, 'g-')
ax[1].plot(gamma_hist, 'r-')
ax[2].plot(U, 'g-')
ax[2].plot(u_hist, 'r-')
plt.savefig("kalman_attitude_Bdirect.png")
plt.show() | mit |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/estimators/estimator.py | 7 | 55607 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == "global_step":
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float, np.float32, np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| mit |
MartinSavc/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
ccarocean/python-contours | setup.py | 1 | 1718 | # -* coding: utf-8 -*-
# pylint: disable=invalid-name
"""Build, test, and install contours."""
import re
from setuptools import setup
version = re.search(
r"^__version__\s*=\s*'(.*)'",
open('contours/__init__.py').read(),
re.M).group(1)
with open('README.rst', 'r') as f:
long_desc = f.read()
setup(
name='contours',
version=version,
description='Contour calculation with Matplotlib.',
author='Michael R. Shannon',
author_email='[email protected]',
license='MIT',
url='https://github.com/ccarocean/python-contours',
download_url=('https://github.com/ccarocean/python-contours/'
'archive/master.zip'),
long_description=long_desc,
packages=['contours'],
platforms=['any'],
keywords=['math', 'plotting', 'matplotlib'],
install_requires=[
'numpy>=1.4.0',
'matplotlib>=1.5.0',
'future'],
extras_require={
'shapely': ['shapely>=1.2.10']
},
test_suite='tests',
tests_require=[
'coverage',
'testfixtures'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
zip_safe=True
)
| mit |
rsignell-usgs/PySeidon | pyseidon/stationClass/plotsStation.py | 2 | 5584 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn
from windrose import WindroseAxes
from interpolation_utils import *
from miscellaneous import depth_at_FVCOM_element as depth_at_ind
class PlotsStation:
"""
Description:
-----------
'Plots' subset of Station class gathers plotting functions
"""
def __init__(self, variable, grid, debug):
self._debug = debug
self._var = variable
self._grid = grid
#Back pointer
grid = self._grid
#self._grid._ax = grid._ax
def rose_diagram(self, direction, norm):
"""
Plot rose diagram
Inputs:
------
- direction = 1D array
- norm = 1D array
"""
#Convertion
#TR: not quite sure here, seems to change from location to location
# express principal axis in compass
direction = np.mod(90.0 - direction, 360.0)
#Create new figure
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)#, axisbg='w')
fig.add_axes(ax)
#Rose
ax.bar(direction, norm , normed=True, opening=0.8, edgecolor='white')
#adjust legend
l = ax.legend(shadow=True, bbox_to_anchor=[-0.1, 0], loc='lower left')
plt.setp(l.get_texts(), fontsize=10)
plt.xlabel('Rose diagram in % of occurrences - Colormap of norms')
plt.show()
def plot_xy(self, x, y, xerror=[], yerror=[], title=' ', xLabel=' ', yLabel=' '):
"""
Simple X vs Y plot
Inputs:
------
- x = 1D array
- y = 1D array
Keywords:
--------
- xerror = error on 'x', 1D array
- yerror = error on 'y', 1D array
- title = plot title, string
- xLabel = title of the x-axis, string
- yLabel = title of the y-axis, string
"""
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
self._fig = plt.plot(x, y, label=title)
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
plt.ylabel(yLabel)
plt.xlabel(xLabel)
if not yerror==[]:
#plt.errorbar(x, y, yerr=yerror, fmt='o', ecolor='k')
plt.fill_between(x, y-yerror, y+yerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
if not xerror==[]:
#plt.errorbar(x, y, xerr=xerror, fmt='o', ecolor='k')
plt.fill_betweenx(y, x-xerror, x+xerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
plt.show()
def Histogram(self, y, title=' ', xLabel=' ', yLabel=' '):
"""
Histogram plot
Inputs:
------
- bins = list of bin edges
- y = 1D array
Keywords:
--------
- title = plot title, string
- xLabel = title of the x-axis, string
- yLabel = title of the y-axis, string
"""
fig = plt.figure(figsize=(18,10))
density, bins = np.histogram(y, bins=50, normed=True, density=True)
unity_density = density / density.sum()
widths = bins[:-1] - bins[1:]
# To plot correct percentages in the y axis
plt.bar(bins[1:], unity_density, width=widths)
formatter = ticker.FuncFormatter(lambda v, pos: str(v * 100))
plt.gca().yaxis.set_major_formatter(formatter)
plt.ylabel(yLabel)
plt.xlabel(xLabel)
plt.show()
def add_points(self, x, y, label=' ', color='black'):
"""
Add scattered points (x,y) on current figure,
where x and y are 1D arrays of the same lengths.
Inputs:
------
- x = float number
- y = float numbe
Keywords:
--------
- Label = a string
- Color = a string, 'red', 'green', etc. or gray shades like '0.5'
"""
plt.scatter(x, y, s=100, color=color)
#TR : annotate does not work on my machine !?
plt.annotate(label, xy=(x, y), xycoords='data', xytext=(-20, 20),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->", shrinkA=0))
def rose_diagram(self, direction, norm):
"""
Plot rose diagram
Inputs:
------
- direction = 1D array
- norm = 1D array
"""
#Convertion
#TR: not quite sure here, seems to change from location to location
# express principal axis in compass
direction = np.mod(90.0 - direction, 360.0)
#Create new figure
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)#, axisbg='w')
fig.add_axes(ax)
#Rose
ax.bar(direction, norm , normed=True, opening=0.8, edgecolor='white')
#adjust legend
l = ax.legend(shadow=True, bbox_to_anchor=[-0.1, 0], loc='lower left')
plt.setp(l.get_texts(), fontsize=10)
plt.xlabel('Rose diagram in % of occurrences - Colormap of norms')
plt.show()
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 |
schets/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
huzq/scikit-learn | sklearn/decomposition/_truncated_svd.py | 2 | 8362 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import _deprecate_positional_args
from ..utils.validation import check_is_fitted
__all__ = ["TruncatedSVD"]
class TruncatedSVD(TransformerMixin, BaseEstimator):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In
that context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or
`X.T * X`, whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK. The
default is larger than the default in
:func:`~sklearn.utils.extmath.randomized_svd` to handle sparse
matrices that may have large slowly decaying spectrum.
random_state : int, RandomState instance, default=None
Used during randomized svd. Pass an int for reproducible results across
multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ : array, shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from scipy.sparse import random as sparse_random
>>> X = sparse_random(100, 100, density=0.01, format='csr',
... random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X)
TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> print(svd.explained_variance_ratio_)
[0.0646... 0.0633... 0.0639... 0.0535... 0.0406...]
>>> print(svd.explained_variance_ratio_.sum())
0.286...
>>> print(svd.singular_values_)
[1.553... 1.512... 1.510... 1.370... 1.199...]
See also
--------
PCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) https://arxiv.org/pdf/0909.4061.pdf
Notes
-----
SVD suffers from a problem called "sign indeterminacy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
@_deprecate_positional_args
def __init__(self, n_components=2, *, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = self._validate_data(X, accept_sparse=['csr', 'csc'],
ensure_min_features=2)
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = U * Sigma
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self)
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
jaidevd/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
wdwvt1/scikit-bio | skbio/stats/distance/tests/test_permanova.py | 13 | 4940 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
class TestPERMANOVA(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999],
name='PERMANOVA results')
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_call_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_call_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0],
name='PERMANOVA results')
obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_call_unequal_group_sizes(self):
exp = pd.Series(
index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/dtypes/base.py | 2 | 5447 | """Extend pandas with custom array types"""
import numpy as np
from pandas import compat
from pandas.errors import AbstractMethodError
class _DtypeOpsMixin(object):
# Not all of pandas' extension dtypes are compatibile with
# the new ExtensionArray interface. This means PandasExtensionDtype
# can't subclass ExtensionDtype yet, as is_extension_array_dtype would
# incorrectly say that these types are extension types.
#
# In the interim, we put methods that are shared between the two base
# classes ExtensionDtype and PandasExtensionDtype here. Both those base
# classes will inherit from this Mixin. Once everything is compatible, this
# class's methods can be moved to ExtensionDtype and removed.
# na_value is the default NA value to use for this type. This is used in
# e.g. ExtensionArray.take. This should be the user-facing "boxed" version
# of the NA value, not the physical NA vaalue for storage.
# e.g. for JSONArray, this is an empty dictionary.
na_value = np.nan
def __eq__(self, other):
"""Check whether 'other' is equal to self.
By default, 'other' is considered equal if
* it's a string matching 'self.name'.
* it's an instance of this type.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, compat.string_types):
return other == self.name
elif isinstance(other, type(self)):
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def names(self):
# type: () -> Optional[List[str]]
"""Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def is_dtype(cls, dtype):
"""Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
is_dtype : bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, 'dtype', dtype)
if isinstance(dtype, np.dtype):
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
class ExtensionDtype(_DtypeOpsMixin):
"""A custom data type, to be paired with an ExtensionArray.
.. versionadded:: 0.23.0
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
* construct_from_string
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
def __str__(self):
return self.name
@property
def type(self):
# type: () -> type
"""The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``.
"""
raise AbstractMethodError(self)
@property
def kind(self):
# type () -> str
"""A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return 'O'
@property
def name(self):
# type: () -> str
"""A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@classmethod
def construct_from_string(cls, string):
"""Attempt to construct this type from a string.
Parameters
----------
string : str
Returns
-------
self : instance of 'cls'
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
If the extension dtype can be constructed without any arguments,
the following may be an adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string)
... if string == cls.name:
... return cls()
... else:
... raise TypeError("Cannot construct a '{}' from "
... "'{}'".format(cls, string))
"""
raise AbstractMethodError(cls)
| mit |
PatrickOReilly/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 110 | 5681 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
OXPHOS/shogun | examples/undocumented/python/graphical/preprocessor_kpca_graphical.py | 11 | 1884 | from numpy import *
import matplotlib.pyplot as p
import os, sys, inspect
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from generate_circle_data import circle_data
cir=circle_data()
number_of_points_for_circle1=42
number_of_points_for_circle2=122
row_vector=2
data=cir.generate_data(number_of_points_for_circle1,number_of_points_for_circle2,row_vector)
d=zeros((row_vector,number_of_points_for_circle1))
d2=zeros((row_vector,number_of_points_for_circle2))
d=[data[i][0:number_of_points_for_circle1] for i in range(0,row_vector)]
d2=[data[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(0,row_vector)]
p.plot(d[1][:],d[0][:],'x',d2[1][:],d2[0][:],'o')
p.title('input data')
p.show()
parameter_list = [[data,0.01,1.0], [data,0.05,2.0]]
def preprocessor_kernelpca_modular (data, threshold, width):
from shogun import RealFeatures
from shogun import KernelPCA
from shogun import GaussianKernel
features = RealFeatures(data)
kernel=GaussianKernel(features,features,width)
preprocessor=KernelPCA(kernel)
preprocessor.init(features)
preprocessor.set_target_dim(2)
#X=preprocessor.get_transformation_matrix()
X2=preprocessor.apply_to_feature_matrix(features)
lx0=len(X2)
modified_d1=zeros((lx0,number_of_points_for_circle1))
modified_d2=zeros((lx0,number_of_points_for_circle2))
modified_d1=[X2[i][0:number_of_points_for_circle1] for i in range(lx0)]
modified_d2=[X2[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(lx0)]
p.plot(modified_d1[0][:],modified_d1[1][:],'o',modified_d2[0][:],modified_d2[1][:],'x')
p.title('final data')
p.show()
return features
if __name__=='__main__':
print('KernelPCA')
preprocessor_kernelpca_modular(*parameter_list[0])
| gpl-3.0 |
mne-tools/mne-python | mne/externals/tqdm/_tqdm/std.py | 14 | 55471 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, _text_width, \
Comparable, RE_ANSI, _is_ascii, FormatReplace, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.BLANK)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| bsd-3-clause |
sangwook236/sangwook-library | python/test/machine_learning/pytorch/mixup/utils.py | 2 | 3718 | import os, sys, time
import torch
from torch.autograd import Variable
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# REF [site] >> https://github.com/vikasverma1077/manifold_mixup/blob/master/supervised/utils.py
def to_one_hot(inp, num_classes, device):
y_onehot = torch.FloatTensor(inp.size(0), num_classes)
y_onehot.zero_()
y_onehot.scatter_(1, inp.unsqueeze(1).data.cpu(), 1)
return Variable(y_onehot.to(device), requires_grad=False)
def mixup_process(out, target_reweighted, lam):
indices = np.random.permutation(out.size(0))
out = out*lam + out[indices]*(1-lam)
target_shuffled_onehot = target_reweighted[indices]
target_reweighted = target_reweighted * lam + target_shuffled_onehot * (1 - lam)
#t1 = target.data.cpu().numpy()
#t2 = target[indices].data.cpu().numpy()
#print (np.sum(t1==t2))
return out, target_reweighted
def mixup_data(x, y, alpha, device):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def get_lambda(alpha=1.0):
'''Return lambda'''
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.
return lam
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def apply(self, img, device):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(2)
w = img.size(3)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = int(np.clip(y - self.length / 2, 0, h))
y2 = int(np.clip(y + self.length / 2, 0, h))
x1 = int(np.clip(x - self.length / 2, 0, w))
x2 = int(np.clip(x + self.length / 2, 0, w))
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img).to(device)
img = img * mask
return img
def create_val_folder(data_set_path):
"""
Used for Tiny-imagenet dataset
Copied from https://github.com/soumendukrg/BME595_DeepLearning/blob/master/Homework-06/train.py
This method is responsible for separating validation images into separate sub folders,
so that test and val data can be read by the pytorch dataloaders
"""
path = os.path.join(data_set_path, 'val/images') # path where validation data is present now
filename = os.path.join(data_set_path, 'val/val_annotations.txt') # file where image2class mapping is present
fp = open(filename, "r") # open file in read mode
data = fp.readlines() # read line by line
# Create a dictionary with image names as key and corresponding classes as values
val_img_dict = {}
for line in data:
words = line.split("\t")
val_img_dict[words[0]] = words[1]
fp.close()
# Create folder if not present, and move image into proper folder
for img, folder in val_img_dict.items():
newpath = (os.path.join(path, folder))
if not os.path.exists(newpath): # check if folder exists
os.makedirs(newpath)
if os.path.exists(os.path.join(path, img)): # Check if image exists in default directory
os.rename(os.path.join(path, img), os.path.join(newpath, img))
if __name__ == "__main__":
create_val_folder('data/tiny-imagenet-200') # Call method to create validation image folders
| gpl-2.0 |
Curious72/sympy | sympy/plotting/plot.py | 55 | 64797 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
JY-Zhou/FreePSI | scripts/evaluateGeneFamilyPsi.py | 1 | 4805 | import os
import math
import sys
import json
import numpy as np
import scipy as scp
import scipy.stats as stats
from outliers import smirnov_grubbs as grubbs
from sklearn import metrics
def procGeneFamily():
geneList = {}
for line in geneMapFile:
substr = line.split('\t')
isoName = substr[3]
geneName = substr[4]
if not geneName in geneList:
geneList[geneName] = []
geneList[geneName].append(isoName)
geneId = {}
for x in geneList:
geneId[x] = []
for y in geneList[x]:
if y in nameMap:
z = int(nameMap[y].split('Iso')[0].replace('Gene', ''))
if not z in geneId[x]:
geneId[x].append(z)
family = {}
for line in geneFamilyFile:
substr = line.split('\t')
if not substr[-1] in family:
family[substr[-1]] = []
family[substr[-1]].append((substr[1], substr[8], substr[9], substr[10]))
familyList = []
for x in family:
familyList.append((len(family[x]), family[x]))
for x in familyList:
if x[0] > 20 and len(x[1][0][2]) > 0:
ids = []
s = 0
for y in x[1]:
if y[0] in geneId:
ids.extend(geneId[y[0]])
s += len(geneList[y[0]])
#info = '>>> Gene_number=%d\tFamily_name=%s\tDescription=%s\n++> Mapped_gene_number=%d\tTotal_isoforms=%d\t' % (x[0], x[1][0][2], x[1][0][3], len(ids), s)
info = '%d\t%s\t%s\t%d\t%d\t' % (x[0], x[1][0][2], x[1][0][3], len(ids), s)
if len(ids) > 20 and s > 2*len(ids):
evaluateList.append((info, ids))
TPMFILTER = 10
def filter(i):
if not i in fam[1]:
return False
if len(truePsi[i]) < 40:
if trueTpm[i] >= TPMFILTER:
return True
return False
def statFilter():
print(fam[0], end = '')
#print("+++ Filter: true TPM >= " + str(TPMFILTER))
reserveGene = 0
for i in range(len(estPsi)):
if filter(i):
reserveGene += 1
#print('+++ # genes passed filter = \t' + str(reserveGene))
#print('+++ # genes failed (low expression level) = \t' + str(len(fam[0]) - reserveGene))
print(reserveGene, end = '\t')
def globalCorrelation():
estFlatPsi = []
trueFlatPsi = []
totreads = 0
totmulti = 0
for i in range(len(estPsi)):
if filter(i):
estFlatPsi.extend(estPsi[i])
trueFlatPsi.extend(truePsi[i])
totreads += readcov[i]
totmulti += multicov[i]
#print('--- Global correlation = ', end = '\t')
print(stats.pearsonr(trueFlatPsi, estFlatPsi)[0], end = '\t')
print(totreads, end = '\t')
print(totmulti)
def genelevel():
geneList = {}
for line in geneMapFile:
substr = line.split('\t')
isoName = substr[3]
geneName = substr[4]
if not geneName in geneList:
geneList[geneName] = []
geneList[geneName].append(isoName)
geneId = {}
for x in geneList:
geneId[x] = []
for y in geneList[x]:
if y in nameMap:
z = int(nameMap[y].split('Iso')[0].replace('Gene', ''))
if not z in geneId[x]:
geneId[x].append(z)
for i in range(len(estPsi)):
if trueTpm[i] > TPMFILTER:
print(i)
eps = 1e-6
nargv = 1
geneFamilyPath = sys.argv[nargv];nargv += 1
readsDisPath = sys.argv[nargv];nargv += 1
geneMapPath = sys.argv[nargv];nargv += 1
nameMapPath = sys.argv[nargv];nargv += 1
truePsiPath = sys.argv[nargv];nargv += 1
trueTpmPath = sys.argv[nargv];nargv += 1
estPsiPath = sys.argv[nargv];nargv += 1
outputPath = sys.argv[nargv];nargv += 1
geneFamilyFile = open(geneFamilyPath, 'r')
readsDisFile = open(readsDisPath, 'r')
geneMapFile = open(geneMapPath, 'r')
nameMapFile = open(nameMapPath, 'r')
truePsiFile = open(truePsiPath, 'r')
trueTpmFile = open(trueTpmPath, 'r')
estPsiFile = open(estPsiPath, 'r')
outputFile = open(outputPath, 'w')
nameMap = json.load(nameMapFile)
truePsi = json.load(truePsiFile)
trueTpm = json.load(trueTpmFile)
estPsi = json.load(estPsiFile)
readcov = []
multicov = []
for line in readsDisFile:
substr = line.split('\t')
readcov.append(float(substr[1]))
multicov.append(float(substr[2]))
sys.stdout = outputFile
evaluateList = []
procGeneFamily()
evaluateList = sorted(evaluateList, key = lambda x: x[0])
print(outputPath.split('/')[-1])
print('Total reads in experiment = %d, total multireads = %d' % (sum(readcov), sum(multicov)))
print('Gene_Number\tFamily_name\tDescription\tMapped_Gene_Number\tTotal_isoform\tPassed_Gene_Number\tCorrelation\t# reads\t# multi-reads')
for fam in evaluateList:
statFilter()
globalCorrelation()
print()
#genelevel()
| gpl-3.0 |
sinpantuflas/aubio | python/demos/demo_specdesc.py | 9 | 2573 | #! /usr/bin/env python
import sys
from aubio import fvec, source, pvoc, specdesc
from numpy import hstack
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
if len(sys.argv) < 2:
print "Usage: %s <filename> [samplerate]" % sys.argv[0]
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
pv = pvoc(win_s, hop_s)
methods = ['default', 'energy', 'hfc', 'complex', 'phase', 'specdiff', 'kl',
'mkl', 'specflux', 'centroid', 'slope', 'rolloff', 'spread', 'skewness',
'kurtosis', 'decrease',]
all_descs = {}
o = {}
for method in methods:
cands = []
all_descs[method] = fvec(0)
o[method] = specdesc(method, win_s)
total_frames = 0
downsample = 2
while True:
samples, read = s()
fftgrain = pv(samples)
#print "%f" % ( total_frames / float(samplerate) ),
for method in methods:
specdesc_val = o[method](fftgrain)[0]
all_descs[method] = hstack ( [all_descs[method], specdesc_val] )
#print "%f" % specdesc_val,
#print
total_frames += read
if read < hop_s: break
if 1:
print "done computing, now plotting"
import matplotlib.pyplot as plt
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot(filename, samplerate, block_size = hop_s, ax = wave )
wave.yaxis.set_visible(False)
wave.xaxis.set_visible(False)
all_desc_times = [ x * hop_s for x in range(len(all_descs["default"])) ]
n_methods = len(methods)
for i, method in enumerate(methods):
#ax = fig.add_subplot (n_methods, 1, i)
#plt2 = plt.axes([0.1, 0.1, 0.8, 0.65], sharex = plt1)
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_methods), 0.8, 0.65 / n_methods], sharex = wave )
ax.plot(all_desc_times, all_descs[method], '-', label = method)
#ax.set_ylabel(method, rotation = 0)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.axis(xmax = all_desc_times[-1], xmin = all_desc_times[0])
ax.annotate(method, xy=(-10, 0), xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
)
set_xlabels_sample2time(ax, all_desc_times[-1], samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
plt.show()
| gpl-3.0 |
JasonKessler/scattertext | scattertext/termscoring/MannWhitneyU.py | 1 | 3706 | import pandas as pd
import numpy as np
from scipy.stats import norm, mannwhitneyu, ranksums
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class MannWhitneyU(CorpusBasedTermScorer):
'''
term_scorer = (MannWhitneyU(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'rotten_fresh_mwu.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['mwu_z']
def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df
def get_name(self):
return "Mann Whitney Z"
| apache-2.0 |
Eric89GXL/scikit-learn | sklearn/feature_extraction/text.py | 1 | 47496 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from sklearn.externals import six
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantage:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle has it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', charset=None, encoding='utf-8',
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, filen ame or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in csc_matrix X."""
return np.diff(X.indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase befor tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold. This value is also called
cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
`vocabulary_` : dict
A mapping of terms to feature indices.
`stop_words_` : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d."
raise ValueError(msg % (len(vocabulary), i))
self.fixed_vocabulary = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary = False
self.binary = binary
self.dtype = dtype
def _sort_features(self, cscmatrix, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return cscmatrix[:, map_index]
def _limit_features(self, cscmatrix, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return cscmatrix, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(cscmatrix)
tfs = np.asarray(cscmatrix.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
return cscmatrix[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict(None)
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
# some Python/Scipy versions won't accept an array.array:
if j_indices:
j_indices = np.frombuffer(j_indices, dtype=np.intc)
else:
j_indices = np.array([], dtype=np.int32)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors.
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
vectors : array, [n_samples, n_features]
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary)
X = X.tocsc()
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else int(round(max_df * n_doc)))
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else int(round(min_df * n_doc)))
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
In the SMART notation used in IR, this class implements several tf-idf
variants:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MSR2008] `C.D. Manning, H. Schuetze and P. Raghavan (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 121-125.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.isspmatrix_csc(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# avoid division by zeros for features that occur in all documents
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
if not hasattr(self, "_idf_diag"):
raise ValueError("idf vector not fitted")
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase befor tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=False,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
def fit(self, raw_documents, y=None):
"""Learn a conversion law from documents to array data"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the representation and return the vectors.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors : array, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform raw text documents to tf-idf vectors
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/arrays/test_datetimelike.py | 1 | 44285 | import re
from typing import Type, Union
import numpy as np
import pytest
from pandas._libs import NaT, OutOfBoundsDatetime, Timestamp
from pandas.compat import np_version_under1p18
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, TimedeltaIndex
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, PandasArray, PeriodArray, TimedeltaArray
# TODO: more freq variants
@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"])
def freqstr(request):
return request.param
@pytest.fixture
def period_index(freqstr):
"""
A fixture to provide PeriodIndex objects with different frequencies.
Most PeriodArray behavior is already tested in PeriodIndex tests,
so here we just test that the PeriodArray behavior matches
the PeriodIndex behavior.
"""
# TODO: non-monotone indexes; NaTs, different start dates
pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
return pi
@pytest.fixture
def datetime_index(freqstr):
"""
A fixture to provide DatetimeIndex objects with different frequencies.
Most DatetimeArray behavior is already tested in DatetimeIndex tests,
so here we just test that the DatetimeArray behavior matches
the DatetimeIndex behavior.
"""
# TODO: non-monotone indexes; NaTs, different start dates, timezones
dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
return dti
@pytest.fixture
def timedelta_index():
"""
A fixture to provide TimedeltaIndex objects with different frequencies.
Most TimedeltaArray behavior is already tested in TimedeltaIndex tests,
so here we just test that the TimedeltaArray behavior matches
the TimedeltaIndex behavior.
"""
# TODO: flesh this out
return TimedeltaIndex(["1 Day", "3 Hours", "NaT"])
class SharedTests:
index_cls: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]]
@pytest.fixture
def arr1d(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
return arr
def test_compare_len1_raises(self):
# make sure we raise when comparing with different lengths, specific
# to the case where one has length-1, which numpy would broadcast
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls(arr)
with pytest.raises(ValueError, match="Lengths must match"):
arr == arr[:1]
# test the index classes while we're at it, GH#23078
with pytest.raises(ValueError, match="Lengths must match"):
idx <= idx[[0]]
@pytest.mark.parametrize(
"result",
[
pd.date_range("2020", periods=3),
pd.date_range("2020", periods=3, tz="UTC"),
pd.timedelta_range("0 days", periods=3),
pd.period_range("2020Q1", periods=3, freq="Q"),
],
)
def test_compare_with_Categorical(self, result):
expected = pd.Categorical(result)
assert all(result == expected)
assert not any(result != expected)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("as_index", [True, False])
def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):
other = pd.Categorical(arr1d, ordered=ordered)
if as_index:
other = pd.CategoricalIndex(other)
left, right = arr1d, other
if reverse:
left, right = right, left
ones = np.ones(arr1d.shape, dtype=bool)
zeros = ~ones
result = left == right
tm.assert_numpy_array_equal(result, ones)
result = left != right
tm.assert_numpy_array_equal(result, zeros)
if not reverse and not as_index:
# Otherwise Categorical raises TypeError bc it is not ordered
# TODO: we should probably get the same behavior regardless?
result = left < right
tm.assert_numpy_array_equal(result, zeros)
result = left <= right
tm.assert_numpy_array_equal(result, ones)
result = left > right
tm.assert_numpy_array_equal(result, zeros)
result = left >= right
tm.assert_numpy_array_equal(result, ones)
def test_take(self):
data = np.arange(100, dtype="i8") * 24 * 3600 * 10 ** 9
np.random.shuffle(data)
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls._simple_new(arr)
takers = [1, 4, 94]
result = arr.take(takers)
expected = idx.take(takers)
tm.assert_index_equal(self.index_cls(result), expected)
takers = np.array([1, 4, 94])
result = arr.take(takers)
expected = idx.take(takers)
tm.assert_index_equal(self.index_cls(result), expected)
@pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp.now().time])
def test_take_fill_raises(self, fill_value):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
arr.take([0, 1], allow_fill=True, fill_value=fill_value)
def test_take_fill(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
result = arr.take([-1, 1], allow_fill=True, fill_value=None)
assert result[0] is pd.NaT
result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)
assert result[0] is pd.NaT
result = arr.take([-1, 1], allow_fill=True, fill_value=pd.NaT)
assert result[0] is pd.NaT
def test_take_fill_str(self, arr1d):
# Cast str fill_value matching other fill_value-taking methods
result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1]))
expected = arr1d[[-1, 1]]
tm.assert_equal(result, expected)
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
arr1d.take([-1, 1], allow_fill=True, fill_value="foo")
def test_concat_same_type(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls(arr)
idx = idx.insert(0, pd.NaT)
arr = self.array_cls(idx)
result = arr._concat_same_type([arr[:-1], arr[1:], arr])
arr2 = arr.astype(object)
expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]), None)
tm.assert_index_equal(self.index_cls(result), expected)
def test_unbox_scalar(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
result = arr._unbox_scalar(arr[0])
expected = arr._data.dtype.type
assert isinstance(result, expected)
result = arr._unbox_scalar(pd.NaT)
assert isinstance(result, expected)
msg = f"'value' should be a {self.dtype.__name__}."
with pytest.raises(ValueError, match=msg):
arr._unbox_scalar("foo")
def test_check_compatible_with(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr._check_compatible_with(arr[0])
arr._check_compatible_with(arr[:1])
arr._check_compatible_with(pd.NaT)
def test_scalar_from_string(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
result = arr._scalar_from_string(str(arr[0]))
assert result == arr[0]
def test_reduce_invalid(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
msg = f"'{type(arr).__name__}' does not implement reduction 'not a method'"
with pytest.raises(TypeError, match=msg):
arr._reduce("not a method")
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_method_doesnt_change_orig(self, method):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr[4] = pd.NaT
fill_value = arr[3] if method == "pad" else arr[5]
result = arr.fillna(method=method)
assert result[4] == fill_value
# check that the original was not changed
assert arr[4] is pd.NaT
def test_searchsorted(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
# scalar
result = arr.searchsorted(arr[1])
assert result == 1
result = arr.searchsorted(arr[2], side="right")
assert result == 3
# own-type
result = arr.searchsorted(arr[1:3])
expected = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = arr.searchsorted(arr[1:3], side="right")
expected = np.array([2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# GH#29884 match numpy convention on whether NaT goes
# at the end or the beginning
result = arr.searchsorted(pd.NaT)
if np_version_under1p18:
# Following numpy convention, NaT goes at the beginning
# (unlike NaN which goes at the end)
assert result == 0
else:
assert result == 10
@pytest.mark.parametrize("box", [None, "index", "series"])
def test_searchsorted_castable_strings(self, arr1d, box, request):
if isinstance(arr1d, DatetimeArray):
tz = arr1d.tz
ts1, ts2 = arr1d[1:3]
if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2):
# If we have e.g. tzutc(), when we cast to string and parse
# back we get pytz.UTC, and then consider them different timezones
# so incorrectly raise.
mark = pytest.mark.xfail(reason="timezone comparisons inconsistent")
request.node.add_marker(mark)
arr = arr1d
if box is None:
pass
elif box == "index":
# Test the equivalent Index.searchsorted method while we're here
arr = self.index_cls(arr)
else:
# Test the equivalent Series.searchsorted method while we're here
arr = pd.Series(arr)
# scalar
result = arr.searchsorted(str(arr[1]))
assert result == 1
result = arr.searchsorted(str(arr[2]), side="right")
assert result == 3
result = arr.searchsorted([str(x) for x in arr[1:3]])
expected = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(
TypeError,
match=re.escape(
f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
"or array of those. Got 'str' instead."
),
):
arr.searchsorted("foo")
with pytest.raises(
TypeError,
match=re.escape(
f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
"or array of those. Got 'StringArray' instead."
),
):
arr.searchsorted([str(arr[1]), "baz"])
def test_getitem_near_implementation_bounds(self):
# We only check tz-naive for DTA bc the bounds are slightly different
# for other tzs
i8vals = np.asarray([NaT.value + n for n in range(1, 5)], dtype="i8")
arr = self.array_cls(i8vals, freq="ns")
arr[0] # should not raise OutOfBoundsDatetime
index = pd.Index(arr)
index[0] # should not raise OutOfBoundsDatetime
ser = pd.Series(arr)
ser[0] # should not raise OutOfBoundsDatetime
def test_getitem_2d(self, arr1d):
# 2d slicing on a 1D array
expected = type(arr1d)(arr1d._data[:, np.newaxis], dtype=arr1d.dtype)
result = arr1d[:, np.newaxis]
tm.assert_equal(result, expected)
# Lookup on a 2D array
arr2d = expected
expected = type(arr2d)(arr2d._data[:3, 0], dtype=arr2d.dtype)
result = arr2d[:3, 0]
tm.assert_equal(result, expected)
# Scalar lookup
result = arr2d[-1, 0]
expected = arr1d[-1]
assert result == expected
def test_iter_2d(self, arr1d):
data2d = arr1d._data[:3, np.newaxis]
arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
result = list(arr2d)
assert len(result) == 3
for x in result:
assert isinstance(x, type(arr1d))
assert x.ndim == 1
assert x.dtype == arr1d.dtype
def test_repr_2d(self, arr1d):
data2d = arr1d._data[:3, np.newaxis]
arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
result = repr(arr2d)
if isinstance(arr2d, TimedeltaArray):
expected = (
f"<{type(arr2d).__name__}>\n"
"[\n"
f"['{arr1d[0]._repr_base()}'],\n"
f"['{arr1d[1]._repr_base()}'],\n"
f"['{arr1d[2]._repr_base()}']\n"
"]\n"
f"Shape: (3, 1), dtype: {arr1d.dtype}"
)
else:
expected = (
f"<{type(arr2d).__name__}>\n"
"[\n"
f"['{arr1d[0]}'],\n"
f"['{arr1d[1]}'],\n"
f"['{arr1d[2]}']\n"
"]\n"
f"Shape: (3, 1), dtype: {arr1d.dtype}"
)
assert result == expected
def test_setitem(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr[0] = arr[1]
expected = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
expected[0] = expected[1]
tm.assert_numpy_array_equal(arr.asi8, expected)
arr[:2] = arr[-2:]
expected[:2] = expected[-2:]
tm.assert_numpy_array_equal(arr.asi8, expected)
@pytest.mark.parametrize(
"box",
[
pd.Index,
pd.Series,
np.array,
list,
PandasArray,
],
)
def test_setitem_object_dtype(self, box, arr1d):
expected = arr1d.copy()[::-1]
if expected.dtype.kind in ["m", "M"]:
expected = expected._with_freq(None)
vals = expected
if box is list:
vals = list(vals)
elif box is np.array:
# if we do np.array(x).astype(object) then dt64 and td64 cast to ints
vals = np.array(vals.astype(object))
elif box is PandasArray:
vals = box(np.asarray(vals, dtype=object))
else:
vals = box(vals).astype(object)
arr1d[:] = vals
tm.assert_equal(arr1d, expected)
def test_setitem_strs(self, arr1d, request):
# Check that we parse strs in both scalar and listlike
if isinstance(arr1d, DatetimeArray):
tz = arr1d.tz
ts1, ts2 = arr1d[-2:]
if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2):
# If we have e.g. tzutc(), when we cast to string and parse
# back we get pytz.UTC, and then consider them different timezones
# so incorrectly raise.
mark = pytest.mark.xfail(reason="timezone comparisons inconsistent")
request.node.add_marker(mark)
# Setting list-like of strs
expected = arr1d.copy()
expected[[0, 1]] = arr1d[-2:]
result = arr1d.copy()
result[:2] = [str(x) for x in arr1d[-2:]]
tm.assert_equal(result, expected)
# Same thing but now for just a scalar str
expected = arr1d.copy()
expected[0] = arr1d[-1]
result = arr1d.copy()
result[0] = str(arr1d[-1])
tm.assert_equal(result, expected)
@pytest.mark.parametrize("as_index", [True, False])
def test_setitem_categorical(self, arr1d, as_index):
expected = arr1d.copy()[::-1]
if not isinstance(expected, PeriodArray):
expected = expected._with_freq(None)
cat = pd.Categorical(arr1d)
if as_index:
cat = pd.CategoricalIndex(cat)
arr1d[:] = cat[::-1]
tm.assert_equal(arr1d, expected)
def test_setitem_raises(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
val = arr[0]
with pytest.raises(IndexError, match="index 12 is out of bounds"):
arr[12] = val
with pytest.raises(TypeError, match="value should be a.* 'object'"):
arr[0] = object()
msg = "cannot set using a list-like indexer with a different length"
with pytest.raises(ValueError, match=msg):
# GH#36339
arr[[]] = [arr[1]]
msg = "cannot set using a slice indexer with a different length than"
with pytest.raises(ValueError, match=msg):
# GH#36339
arr[1:1] = arr[:3]
@pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
def test_setitem_numeric_raises(self, arr1d, box):
# We dont case e.g. int64 to our own dtype for setitem
msg = (
f"value should be a '{arr1d._scalar_type.__name__}', "
"'NaT', or array of those. Got"
)
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0, 1])
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0.0, 1.0])
def test_inplace_arithmetic(self):
# GH#24115 check that iadd and isub are actually in-place
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
expected = arr + pd.Timedelta(days=1)
arr += pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
expected = arr - pd.Timedelta(days=1)
arr -= pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
def test_shift_fill_int_deprecated(self):
# GH#31971
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = arr.shift(1, fill_value=1)
expected = arr.copy()
if self.array_cls is PeriodArray:
fill_val = PeriodArray._scalar_type._from_ordinal(1, freq=arr.freq)
else:
fill_val = arr._scalar_type(1)
expected[0] = fill_val
expected[1:] = arr[:-1]
tm.assert_equal(result, expected)
def test_median(self, arr1d):
arr = arr1d
if len(arr) % 2 == 0:
# make it easier to define `expected`
arr = arr[:-1]
expected = arr[len(arr) // 2]
result = arr.median()
assert type(result) is type(expected)
assert result == expected
arr[len(arr) // 2] = NaT
if not isinstance(expected, Period):
expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean()
assert arr.median(skipna=False) is NaT
result = arr.median()
assert type(result) is type(expected)
assert result == expected
assert arr[:0].median() is NaT
assert arr[:0].median(skipna=False) is NaT
# 2d Case
arr2 = arr.reshape(-1, 1)
result = arr2.median(axis=None)
assert type(result) is type(expected)
assert result == expected
assert arr2.median(axis=None, skipna=False) is NaT
result = arr2.median(axis=0)
expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype)
tm.assert_equal(result, expected2)
result = arr2.median(axis=0, skipna=False)
expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype)
tm.assert_equal(result, expected2)
result = arr2.median(axis=1)
tm.assert_equal(result, arr)
result = arr2.median(axis=1, skipna=False)
tm.assert_equal(result, arr)
class TestDatetimeArray(SharedTests):
index_cls = pd.DatetimeIndex
array_cls = DatetimeArray
dtype = Timestamp
@pytest.fixture
def arr1d(self, tz_naive_fixture, freqstr):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz)
dta = dti._data
return dta
def test_round(self, arr1d):
# GH#24064
dti = self.index_cls(arr1d)
result = dti.round(freq="2T")
expected = dti - pd.Timedelta(minutes=1)
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
dta = dti._data
result = dta.round(freq="2T")
expected = expected._data._with_freq(None)
tm.assert_datetime_array_equal(result, expected)
def test_array_interface(self, datetime_index):
arr = DatetimeArray(datetime_index)
# default asarray gives the same underlying data (for tz naive)
result = np.asarray(arr)
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
# specifying M8[ns] gives the same result as default
result = np.asarray(arr, dtype="datetime64[ns]")
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]", copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]")
assert result is not expected
tm.assert_numpy_array_equal(result, expected)
# to object dtype
result = np.asarray(arr, dtype=object)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to other dtype always copies
result = np.asarray(arr, dtype="int64")
assert result is not arr.asi8
assert not np.may_share_memory(arr, result)
expected = arr.asi8.copy()
tm.assert_numpy_array_equal(result, expected)
# other dtypes handled by numpy
for dtype in ["float64", str]:
result = np.asarray(arr, dtype=dtype)
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
def test_array_object_dtype(self, arr1d):
# GH#23524
arr = arr1d
dti = self.index_cls(arr1d)
expected = np.array(list(dti))
result = np.array(arr, dtype=object)
tm.assert_numpy_array_equal(result, expected)
# also test the DatetimeIndex method while we're at it
result = np.array(dti, dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_array_tz(self, arr1d):
# GH#23524
arr = arr1d
dti = self.index_cls(arr1d)
expected = dti.asi8.view("M8[ns]")
result = np.array(arr, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
# check that we are not making copies when setting copy=False
result = np.array(arr, dtype="M8[ns]", copy=False)
assert result.base is expected.base
assert result.base is not None
result = np.array(arr, dtype="datetime64[ns]", copy=False)
assert result.base is expected.base
assert result.base is not None
def test_array_i8_dtype(self, arr1d):
arr = arr1d
dti = self.index_cls(arr1d)
expected = dti.asi8
result = np.array(arr, dtype="i8")
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
# check that we are still making copies when setting copy=False
result = np.array(arr, dtype="i8", copy=False)
assert result.base is not expected.base
assert result.base is None
def test_from_array_keeps_base(self):
# Ensure that DatetimeArray._data.base isn't lost.
arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
dta = DatetimeArray(arr)
assert dta._data is arr
dta = DatetimeArray(arr[:0])
assert dta._data.base is arr
def test_from_dti(self, arr1d):
arr = arr1d
dti = self.index_cls(arr1d)
assert list(dti) == list(arr)
# Check that Index.__new__ knows what to do with DatetimeArray
dti2 = pd.Index(arr)
assert isinstance(dti2, pd.DatetimeIndex)
assert list(dti2) == list(arr)
def test_astype_object(self, arr1d):
arr = arr1d
dti = self.index_cls(arr1d)
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(dti)
def test_to_perioddelta(self, datetime_index, freqstr):
# GH#23113
dti = datetime_index
arr = DatetimeArray(dti)
with tm.assert_produces_warning(FutureWarning):
# Deprecation GH#34853
expected = dti.to_perioddelta(freq=freqstr)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# stacklevel is chosen to be "correct" for DatetimeIndex, not
# DatetimeArray
result = arr.to_perioddelta(freq=freqstr)
assert isinstance(result, TimedeltaArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
def test_to_period(self, datetime_index, freqstr):
dti = datetime_index
arr = DatetimeArray(dti)
expected = dti.to_period(freq=freqstr)
result = arr.to_period(freq=freqstr)
assert isinstance(result, PeriodArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
def test_to_period_2d(self, arr1d):
arr2d = arr1d.reshape(1, -1)
warn = None if arr1d.tz is None else UserWarning
with tm.assert_produces_warning(warn):
result = arr2d.to_period("D")
expected = arr1d.to_period("D").reshape(1, -1)
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize("propname", pd.DatetimeIndex._bool_ops)
def test_bool_properties(self, arr1d, propname):
# in this case _bool_ops is just `is_leap_year`
dti = self.index_cls(arr1d)
arr = arr1d
assert dti.freq == arr.freq
result = getattr(arr, propname)
expected = np.array(getattr(dti, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("propname", pd.DatetimeIndex._field_ops)
def test_int_properties(self, arr1d, propname):
if propname in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
return
dti = self.index_cls(arr1d)
arr = arr1d
result = getattr(arr, propname)
expected = np.array(getattr(dti, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_take_fill_valid(self, arr1d):
arr = arr1d
dti = self.index_cls(arr1d)
now = Timestamp.now().tz_localize(dti.tz)
result = arr.take([-1, 1], allow_fill=True, fill_value=now)
assert result[0] == now
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# fill_value Timedelta invalid
arr.take([-1, 1], allow_fill=True, fill_value=now - now)
with pytest.raises(TypeError, match=msg):
# fill_value Period invalid
arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1"))
tz = None if dti.tz is not None else "US/Eastern"
now = Timestamp.now().tz_localize(tz)
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
# Timestamp with mismatched tz-awareness
arr.take([-1, 1], allow_fill=True, fill_value=now)
value = pd.NaT.value
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
value = np.timedelta64("NaT", "ns")
with pytest.raises(TypeError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
if arr.tz is not None:
# GH#37356
# Assuming here that arr1d fixture does not include Australia/Melbourne
value = Timestamp.now().tz_localize("Australia/Melbourne")
msg = "Timezones don't match. .* != 'Australia/Melbourne'"
with pytest.raises(ValueError, match=msg):
# require tz match, not just tzawareness match
arr.take([-1, 1], allow_fill=True, fill_value=value)
def test_concat_same_type_invalid(self, arr1d):
# different timezones
arr = arr1d
if arr.tz is None:
other = arr.tz_localize("UTC")
else:
other = arr.tz_localize(None)
with pytest.raises(ValueError, match="to_concat must have the same"):
arr._concat_same_type([arr, other])
def test_concat_same_type_different_freq(self):
# we *can* concatenate DTI with different freqs.
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
b = DatetimeArray(pd.date_range("2000", periods=2, freq="H", tz="US/Central"))
result = DatetimeArray._concat_same_type([a, b])
expected = DatetimeArray(
pd.to_datetime(
[
"2000-01-01 00:00:00",
"2000-01-02 00:00:00",
"2000-01-01 00:00:00",
"2000-01-01 01:00:00",
]
).tz_localize("US/Central")
)
tm.assert_datetime_array_equal(result, expected)
def test_strftime(self, arr1d):
arr = arr1d
result = arr.strftime("%Y %b")
expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_strftime_nat(self):
# GH 29578
arr = DatetimeArray(DatetimeIndex(["2019-01-01", pd.NaT]))
result = arr.strftime("%Y-%m-%d")
expected = np.array(["2019-01-01", np.nan], dtype=object)
tm.assert_numpy_array_equal(result, expected)
class TestTimedeltaArray(SharedTests):
index_cls = TimedeltaIndex
array_cls = TimedeltaArray
dtype = pd.Timedelta
def test_from_tdi(self):
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
arr = TimedeltaArray(tdi)
assert list(arr) == list(tdi)
# Check that Index.__new__ knows what to do with TimedeltaArray
tdi2 = pd.Index(arr)
assert isinstance(tdi2, TimedeltaIndex)
assert list(tdi2) == list(arr)
def test_astype_object(self):
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
arr = TimedeltaArray(tdi)
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(tdi)
def test_to_pytimedelta(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
expected = tdi.to_pytimedelta()
result = arr.to_pytimedelta()
tm.assert_numpy_array_equal(result, expected)
def test_total_seconds(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
expected = tdi.total_seconds()
result = arr.total_seconds()
tm.assert_numpy_array_equal(result, expected.values)
@pytest.mark.parametrize("propname", TimedeltaIndex._field_ops)
def test_int_properties(self, timedelta_index, propname):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
result = getattr(arr, propname)
expected = np.array(getattr(tdi, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, timedelta_index):
arr = TimedeltaArray(timedelta_index)
# default asarray gives the same underlying data
result = np.asarray(arr)
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
# specifying m8[ns] gives the same result as default
result = np.asarray(arr, dtype="timedelta64[ns]")
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]", copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]")
assert result is not expected
tm.assert_numpy_array_equal(result, expected)
# to object dtype
result = np.asarray(arr, dtype=object)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to other dtype always copies
result = np.asarray(arr, dtype="int64")
assert result is not arr.asi8
assert not np.may_share_memory(arr, result)
expected = arr.asi8.copy()
tm.assert_numpy_array_equal(result, expected)
# other dtypes handled by numpy
for dtype in ["float64", str]:
result = np.asarray(arr, dtype=dtype)
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
def test_take_fill_valid(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
td1 = pd.Timedelta(days=1)
result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
assert result[0] == td1
now = Timestamp.now()
value = now
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# fill_value Timestamp invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = now.to_period("D")
with pytest.raises(TypeError, match=msg):
# fill_value Period invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = np.datetime64("NaT", "ns")
with pytest.raises(TypeError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
class TestPeriodArray(SharedTests):
index_cls = PeriodIndex
array_cls = PeriodArray
dtype = Period
@pytest.fixture
def arr1d(self, period_index):
return period_index._data
def test_from_pi(self, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
assert list(arr) == list(pi)
# Check that Index.__new__ knows what to do with PeriodArray
pi2 = pd.Index(arr)
assert isinstance(pi2, PeriodIndex)
assert list(pi2) == list(arr)
def test_astype_object(self, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(pi)
def test_take_fill_valid(self, arr1d):
arr = arr1d
value = pd.NaT.value
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
value = np.timedelta64("NaT", "ns")
with pytest.raises(TypeError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
@pytest.mark.parametrize("how", ["S", "E"])
def test_to_timestamp(self, how, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
expected = DatetimeArray(pi.to_timestamp(how=how))
result = arr.to_timestamp(how=how)
assert isinstance(result, DatetimeArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
def test_to_timestamp_out_of_bounds(self):
# GH#19643 previously overflowed silently
pi = pd.period_range("1500", freq="Y", periods=3)
msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi.to_timestamp()
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi._data.to_timestamp()
@pytest.mark.parametrize("propname", PeriodArray._bool_ops)
def test_bool_properties(self, arr1d, propname):
# in this case _bool_ops is just `is_leap_year`
pi = self.index_cls(arr1d)
arr = arr1d
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("propname", PeriodArray._field_ops)
def test_int_properties(self, arr1d, propname):
pi = self.index_cls(arr1d)
arr = arr1d
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, arr1d):
arr = arr1d
# default asarray gives objects
result = np.asarray(arr)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to object dtype (same as default)
result = np.asarray(arr, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(arr, dtype="int64")
tm.assert_numpy_array_equal(result, arr.asi8)
# to other dtypes
msg = r"float\(\) argument must be a string or a number, not 'Period'"
with pytest.raises(TypeError, match=msg):
np.asarray(arr, dtype="float64")
result = np.asarray(arr, dtype="S20")
expected = np.asarray(arr).astype("S20")
tm.assert_numpy_array_equal(result, expected)
def test_strftime(self, arr1d):
arr = arr1d
result = arr.strftime("%Y")
expected = np.array([per.strftime("%Y") for per in arr], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_strftime_nat(self):
# GH 29578
arr = PeriodArray(PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"))
result = arr.strftime("%Y-%m-%d")
expected = np.array(["2019-01-01", np.nan], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"array,casting_nats",
[
(
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(pd.NaT, np.timedelta64("NaT", "ns")),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(pd.NaT, np.datetime64("NaT", "ns")),
),
(pd.period_range("2000-01-01", periods=3, freq="D")._data, (pd.NaT,)),
],
ids=lambda x: type(x).__name__,
)
def test_casting_nat_setitem_array(array, casting_nats):
expected = type(array)._from_sequence([pd.NaT, array[1], array[2]])
for nat in casting_nats:
arr = array.copy()
arr[0] = nat
tm.assert_equal(arr, expected)
@pytest.mark.parametrize(
"array,non_casting_nats",
[
(
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(np.datetime64("NaT", "ns"), pd.NaT.value),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(np.timedelta64("NaT", "ns"), pd.NaT.value),
),
(
pd.period_range("2000-01-01", periods=3, freq="D")._data,
(np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), pd.NaT.value),
),
],
ids=lambda x: type(x).__name__,
)
def test_invalid_nat_setitem_array(array, non_casting_nats):
msg = (
"value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. "
"Got '(timedelta64|datetime64|int)' instead."
)
for nat in non_casting_nats:
with pytest.raises(TypeError, match=msg):
array[0] = nat
@pytest.mark.parametrize(
"array",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("2000", periods=4).array,
],
)
def test_to_numpy_extra(array):
if np_version_under1p18:
# np.isnan(NaT) raises, so use pandas'
isnan = pd.isna
else:
isnan = np.isnan
array[0] = pd.NaT
original = array.copy()
result = array.to_numpy()
assert isnan(result[0])
result = array.to_numpy(dtype="int64")
assert result[0] == -9223372036854775808
result = array.to_numpy(dtype="int64", na_value=0)
assert result[0] == 0
result = array.to_numpy(na_value=array[1].to_numpy())
assert result[0] == result[1]
result = array.to_numpy(na_value=array[1].to_numpy(copy=False))
assert result[0] == result[1]
tm.assert_equal(array, original)
@pytest.mark.parametrize("as_index", [True, False])
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
TimedeltaIndex([1, 2], unit="D"),
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"klass",
[
list,
np.array,
pd.array,
pd.Series,
pd.Index,
pd.Categorical,
pd.CategoricalIndex,
],
)
def test_searchsorted_datetimelike_with_listlike(values, klass, as_index):
# https://github.com/pandas-dev/pandas/issues/32762
if not as_index:
values = values._data
result = values.searchsorted(klass(values))
expected = np.array([0, 1], dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
TimedeltaIndex([1, 2], unit="D"),
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
)
def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):
# https://github.com/pandas-dev/pandas/issues/32762
msg = "[Unexpected type|Cannot compare]"
with pytest.raises(TypeError, match=msg):
values.searchsorted(arg)
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_period_index_construction_from_strings(klass):
# https://github.com/pandas-dev/pandas/issues/26109
strings = ["2020Q1", "2020Q2"] * 2
data = klass(strings)
result = PeriodIndex(data, freq="Q")
expected = PeriodIndex([Period(s) for s in strings])
tm.assert_index_equal(result, expected)
| bsd-3-clause |
thientu/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
dennissergeev/umtools | arke/cart.py | 2 | 14264 | # -*- coding: utf-8 -*-
"""
Collection of functions for cartographic plotting.
"""
import cartopy
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from copy import copy
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
import numpy as np
import shapely.geometry as sgeom
best_ticks = [1, 2, 5, 10, 15, 20, 30, 50]
def _find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)], }
return sgeom.LineString(points[side])
def _lambert_xticks(ax, ticks):
"""Draw ticks on the bottom x-axis of a Lambert Conformal projection."""
def _te(xy):
return xy[0]
def _lc(t, n, b):
return np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', _lc, _te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick)
for xtick in xticklabels])
def _lambert_yticks(ax, ticks):
"""Draw ticks on the left y-axis of a Lamber Conformal projection."""
def _te(xy):
return xy[1]
def _lc(t, n, b):
return np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', _lc, _te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick)
for ytick in yticklabels])
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""
Get the tick locations and labels for
an axis of a Lambert Conformal projection.
"""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().
vertices.tolist())
axis = _find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(),
xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
def label_map(ax, toponyms, transform=None, **text_kw):
"""
Put labels on a map
Parameters
----------
ax: cartopy.mpl.geoaxes.GeoAxesSubplot
axes to put names on
toponyms: list
list of dictionaries containing `lon`, `lat`, `name` keys
defining the longitude and latitude of each `name` toponym
transform: matplotlib.transforms.BboxTransformTo, optional
axes transform; set to ax.transAxes by default
"""
if transform is None:
transform = ax.transAxes
for i in toponyms:
txt = ax.text(i['lon'], i['lat'], i['name'],
transform=transform,
**text_kw)
txt.set_zorder(20)
def pc_map(fig, subplot_grd=111,
projection=ccrs.PlateCarree(), coast=None, extent=None):
"""
Create axes with the Plate Carree projection in a given figure
Parameters
----------
fig: matplotlib.figure.Figure
matplotlib figure
subplot_grd: int, optional
3-digit integer describing the position of the subplot
default: 111
projection: str or cartopy.crs.CRS, optional
projection class of the axes, default: cartopy.crs.PlateCarree
coast: str or dict, optional
parameters to draw a coastline, see `add_coastline()` for details
extent: sequence, optional
extent (x0, x1, y0, y1) of the map in the given coordinate projection
Returns
-------
cartopy.mpl.geoaxes.GeoAxesSubplot
axes with the Plate Caree projection
"""
ax = fig.add_subplot(subplot_grd, projection=projection)
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if isinstance(extent, list):
ax.set_extent(extent, crs=ccrs.PlateCarree())
add_coastline(ax, coast)
return ax
def get_xy_ticks(ticks):
"""Define gridline locations"""
try:
if len(ticks) == 2:
try:
_x, _y = ticks[0], ticks[1]
except KeyError:
_x, _y = ticks['x'], ticks['y']
if (isinstance(_x, (int, float))
and isinstance(_y, (int, float))):
# assume ticks is [xstep, ystep] sequence
xticks = list(np.arange(-180, 181, _x))
yticks = list(np.arange(-90, 91, _y))
elif (isinstance(_x, (tuple, list, np.ndarray))
and isinstance(_y, (tuple, list, np.ndarray))):
# assume ticks is [xticks, yticks] sequence
xticks, yticks = list(_x), list(_y)
except TypeError:
# fall back to default arrays
xticks = list(np.linspace(-180, 180, 37))
yticks = list(np.linspace(-90, 90, 19))
return xticks, yticks
def lcc_map(fig, subplot_grd=111, clon=None, clat=None, extent=None,
coast=None, ticks=None):
"""
Create axes the Lambert Conformal Conic (LCC) in a given figure
Parameters
----------
fig: matplotlib.figure.Figure
matplotlib figure
subplot_grd: int, optional
3-digit integer describing the position of the subplot
default: 111
clon: float
central longitude of LCC projection
clat: float
central latitude of LCC projection
coast: str or dict, optional
parameters to draw a coastline, see `add_coastline()` for details
extent: sequence, optional
extent (x0, x1, y0, y1) of the map in the given coordinate projection
ticks: sequence, optional
see `get_xy_ticks()` for details
Returns
-------
cartopy.mpl.geoaxes.GeoAxesSubplot
axes with the LCC projection
"""
# Create a Lambert Conformal projection:
proj = ccrs.LambertConformal(central_longitude=clon, central_latitude=clat)
# Draw a set of axes with coastlines:
ax = fig.add_subplot(subplot_grd, projection=proj)
if isinstance(extent, list):
ax.set_extent(extent, crs=ccrs.PlateCarree())
add_coastline(ax, coast)
if ticks:
xticks, yticks = get_xy_ticks(ticks)
# *must* call draw in order to get the axis boundary used to add ticks
fig.canvas.draw()
# Draw the lines using cartopy's built-in gridliner
ax.gridlines(xlocs=xticks, ylocs=yticks)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
_lambert_xticks(ax, xticks)
_lambert_yticks(ax, yticks)
return ax
class GeoAxesGrid(AxesGrid):
"""
A subclass of :class:`mpl_toolkits.axes_grid1.AxesGrid` representing
a grid of maps with the same projection :class:`~cartopy.crs.Projection`.
.. note::
* `axes_class` is defined automatically
* The :class:`AxesGrid` built-in labelling is always switched off,
and instead a standard procedure of creating
grid lines and labels should be used.
"""
def __init__(self, fig, rect, nrows_ncols, projection, **axesgrid_kw):
"""
Build a :class:`GeoAxesGrid` instance with a grid nrows*ncols
:class:`GeoAxes` with a projection :class:`~cartopy.crs.Projection`
in :class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Kwargs:
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
axesgrid_kw['axes_class'] = (GeoAxes, dict(map_projection=projection))
axesgrid_kw['label_mode'] = '' # note the empty label_mode
super(GeoAxesGrid, self).__init__(fig, rect, nrows_ncols,
**axesgrid_kw)
def lcc_map_grid(fig, nrows_ncols, clon, clat, extent=None,
coast=None, ticks=None, **axesgrid_kw):
"""
Build an `AxesGrid` instance with a grid `nrows_ncols` with
the Lambert Conformal Conic (LCC) projection and `**axesgrid_kw` parameters
Parameters
----------
fig: matplotlib.figure.Figure
parent figure
nrows_ncols: tuple of int
N rows and N cols
clon: float
central longitude of LCC projection
clat: float
central latitude of LCC projection
coast: str or dict, optional
parameters to draw a coastline, see `add_coastline()` for details
extent: sequence, optional
extent (x0, x1, y0, y1) of the map in the given coordinate projection
ticks: sequence, optional
see `get_xy_ticks()` for details
Returns
-------
GeoAxesGrid
"""
proj = ccrs.LambertConformal(central_longitude=clon, central_latitude=clat)
axgr = GeoAxesGrid(fig, 111, nrows_ncols, projection=proj, **axesgrid_kw)
if ticks is not None:
xticks, yticks = get_xy_ticks(ticks)
for ax in axgr:
if isinstance(extent, list):
ax.set_extent(extent, crs=ccrs.PlateCarree())
add_coastline(ax, coast)
if ticks is not None:
# *must* call draw in order to get the axis boundary to add ticks
fig.canvas.draw()
# Draw the lines using cartopy's built-in gridliner
ax.gridlines(xlocs=xticks, ylocs=yticks)
# Label the end-points of the gridlines using the tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
_lambert_xticks(ax, xticks)
_lambert_yticks(ax, yticks)
return axgr
def merc_map_grid(fig, nrows_ncols, extent=None,
coast=None, ticks=None, **axesgrid_kw):
"""
Build an `AxesGrid` instance with a grid `nrows_ncols` with
the Mercator projection and `**axesgrid_kw` parameters
Parameters
----------
fig: matplotlib.figure.Figure
parent figure
nrows_ncols: tuple of int
N rows and N cols
coast: str or dict, optional
parameters to draw a coastline, see `add_coastline()` for details
extent: sequence, optional
extent (x0, x1, y0, y1) of the map in the given coordinate projection
ticks: sequence, optional
see `get_xy_ticks()` for details
axesgrid_kw: dict, optional
AxesGrid class keywords
Returns
-------
GeoAxesGrid
"""
proj = ccrs.Mercator()
axgr = GeoAxesGrid(fig, 111, nrows_ncols, projection=proj, **axesgrid_kw)
if ticks is not None:
xticks, yticks = get_xy_ticks(ticks)
for ax in axgr:
if isinstance(extent, list):
ax.set_extent(extent, crs=ccrs.PlateCarree())
add_coastline(ax, coast)
if ticks is not None:
gl = ax.gridlines(xlocs=xticks, ylocs=yticks, draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return axgr
def add_coastline(ax, coast):
"""
Add coast outline to a given GeoAxes
Parameters
----------
ax: cartopy.mpl.geoaxes.GeoAxesSubplot
axes to add coastlines
coast: str or dict
If str object is given it assumed to be a named `scale`
(resolution to use from the Natural Earth dataset,
currently can be one of "110m", "50m", and "10m").
If dict is given, it assumed to contain the scale, as well as
other kwargs of `cartopy.feature.NaturalEarthFeature`.
"""
if isinstance(coast, str):
feature = cartopy.feature.NaturalEarthFeature(name='coastline',
category='physical',
scale=coast,
edgecolor='#AAAAAA',
facecolor='#AAAAAA')
ax.add_feature(feature)
elif isinstance(coast, dict):
feature = cartopy.feature.NaturalEarthFeature(name='coastline',
category='physical',
**coast)
ax.add_feature(feature)
| mit |
mantidproject/mantid | qt/python/mantidqt/project/plotsloader.py | 3 | 17406 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
import copy
import matplotlib.axes
import matplotlib.cm as cm
import matplotlib.colors
from matplotlib import axis, ticker # noqa
from matplotlib.ticker import NullFormatter,\
ScalarFormatter, LogFormatterSciNotation
from mantid import logger
from mantid.api import AnalysisDataService as ADS
from mantid.plots.legend import LegendProperties
from mantid.plots.plotfunctions import create_subplots
# Constants set in workbench.plotting.functions but would cause backwards reliability
from mantidqt.plotting.functions import pcolormesh
SUBPLOT_WSPACE = 0.5
SUBPLOT_HSPACE = 0.5
TICK_FORMATTERS = {"NullFormatter": NullFormatter(),
"ScalarFormatter": ScalarFormatter(useOffset=True),
"LogFormatterSciNotation": LogFormatterSciNotation()
}
def get_tick_format(tick_formatters: dict, tick_formatter: str,
tick_format):
if tick_formatter == "FixedFormatter":
fmt = ticker.FixedFormatter(tick_format)
else:
try:
fmt = tick_formatters[tick_formatter]
except KeyError:
# If the formatter is not FixedFormatter or
# does not exist in global_tick_format_dict,
# default to ScalarFormatter
fmt = tick_formatters["ScalarFormatter"]
return fmt
class PlotsLoader(object):
def __init__(self):
self.color_bar_remade = False
def load_plots(self, plots_list):
if plots_list is None:
return
for plot_ in plots_list:
try:
self.make_fig(plot_)
except BaseException as e:
# Catch all errors in here so it can fail silently-ish
if isinstance(e, KeyboardInterrupt):
raise KeyboardInterrupt(str(e))
logger.warning("A plot was unable to be loaded from the save file. Error: " + str(e))
def restore_normalise_obj_from_dict(self, norm_dict):
supported_norm_types = {
# matplotlib norms that are supported.
'Normalize': matplotlib.colors.Normalize,
'LogNorm': matplotlib.colors.LogNorm,
}
# If there is a norm dict, but the type is not specified, default to base Normalize class.
type = norm_dict['type'] if 'type' in norm_dict.keys() else 'Normalize'
if type not in supported_norm_types.keys():
logger.debug(
f"Color normalisation of type {norm_dict['type']} is not supported. Normalisation will not be set on this plot")
return None
norm = supported_norm_types[type]
return norm(vmin=norm_dict['vmin'], vmax=norm_dict['vmax'], clip=norm_dict['clip'])
def make_fig(self, plot_dict, create_plot=True):
"""
This method currently only considers single matplotlib.axes.Axes based figures as that is the most common case
:param plot_dict: dictionary; A dictionary of various items intended to recreate a figure
:param create_plot: Bool; whether or not to make the plot, or to return the figure.
:return: matplotlib.figure; Only returns if create_plot=False
""" # Grab creation arguments
creation_args = plot_dict["creationArguments"]
if len(creation_args) == 0:
logger.information(
"A plot could not be loaded from the save file, as it did not have creation_args. "
"The original plot title was: {}".format(plot_dict["label"]))
return
for sublist in creation_args:
for cargs_dict in sublist:
if 'norm' in cargs_dict and type(cargs_dict['norm']) is dict:
cargs_dict['norm'] = self.restore_normalise_obj_from_dict(cargs_dict['norm'])
fig, axes_matrix, _, _ = create_subplots(len(creation_args))
axes_list = axes_matrix.flatten().tolist()
for ax, cargs_list in zip(axes_list, creation_args):
creation_args_copy = copy.deepcopy(cargs_list)
for cargs in cargs_list:
if "workspaces" in cargs:
workspace_name = cargs.pop("workspaces")
workspace = ADS.retrieve(workspace_name)
self.workspace_plot_func(workspace, ax, ax.figure, cargs)
elif "function" in cargs:
self.plot_func(ax, cargs)
for cargs in creation_args_copy:
cargs.pop('normalize_by_bin_width', None)
ax.creation_args = creation_args_copy
# Update the fig
fig._label = plot_dict["label"]
fig.canvas.set_window_title(plot_dict["label"])
self.restore_figure_data(fig=fig, dic=plot_dict)
# If the function should create plot then create else return
if create_plot:
fig.show()
else:
return fig
def workspace_plot_func(self, workspace, axes, fig, creation_arg):
"""
Plot's the graph from the given workspace, axes and creation_args. then returns the function used to create it.
:param workspace: mantid.Workspace; Workspace to create the graph from
:param axes: matplotlib.Axes; Axes to create the graph
:param fig: matplotlib.Figure; Figure to add the colormesh to
:param creation_arg: The creation arguments that have been used to create the details of the
:return: String; The function used to create the plot
"""
# Remove the function kwarg and if it's not found set to "plot
function_to_call = creation_arg.pop("function")
# Handle recreating the cmap objects
if "cmap" in creation_arg:
creation_arg["cmap"] = getattr(matplotlib.cm, creation_arg["cmap"])
function_dict = {
"plot": axes.plot,
"scatter": axes.scatter,
"errorbar": axes.errorbar,
"pcolor": axes.pcolor,
"pcolorfast": axes.pcolorfast,
"pcolormesh": pcolormesh,
"imshow": pcolormesh,
"contour": axes.contour,
"contourf": axes.contourf,
"tripcolor": axes.tripcolor,
"tricontour": axes.tricontour,
"tricontourf": axes.tricontourf
}
func = function_dict[function_to_call]
# Plotting is done via an Axes object unless a colorbar needs to be added
if function_to_call in ["imshow", "pcolormesh"]:
func([workspace], fig, color_norm=creation_arg['norm'], normalize_by_bin_width=creation_arg['normalize_by_bin_width'])
self.color_bar_remade = True
else:
func(workspace, **creation_arg)
def plot_func(self, axes, creation_arg):
"""
Calls plotting functions that aren't associated with workspaces, such as axhline and axvline.
:param axes: matplotlib.Axes; Axes to call the function on
:param creation_arg: The functions' arguments when it was originally called.
"""
function_to_call = creation_arg.pop('function')
function_dict = {
"axhline": axes.axhline,
"axvline": axes.axvline
}
func = function_dict[function_to_call]
func(*creation_arg['args'], **creation_arg['kwargs'])
def restore_figure_data(self, fig, dic):
self.restore_fig_properties(fig, dic["properties"])
axes_list = dic["axes"]
for index, ax in enumerate(fig.axes):
try:
self.restore_fig_axes(ax, axes_list[index])
except IndexError as e:
if not self.color_bar_remade:
raise IndexError(e)
except KeyError:
logger.notice("Not adding data to blank axis.")
@staticmethod
def restore_fig_properties(fig, dic):
fig.set_figheight(dic["figHeight"])
fig.set_figwidth(dic["figWidth"])
fig.set_dpi(dic["dpi"])
def restore_fig_axes(self, ax, dic):
# Restore axis properties
properties = dic["properties"]
self.update_properties(ax, properties)
# Set the titles
if dic["xAxisTitle"] is not None:
ax.set_xlabel(dic["xAxisTitle"])
if dic["yAxisTitle"] is not None:
ax.set_ylabel(dic["yAxisTitle"])
if dic["title"] is not None:
ax.set_title(dic["title"])
# Update the lines
line_list = dic["lines"]
for line in line_list:
self.update_lines(ax, line)
# Update/set text
if "texts" in dic:
for text_ in dic["texts"]:
self.create_text_from_dict(ax, text_)
# Update artists that are text
if "textFromArtists" in dic:
for artist in dic["textFromArtists"]:
self.create_text_from_dict(ax, artist)
# Update Legend
self.update_legend(ax, dic["legend"])
# Update colorbar if present
if self.color_bar_remade and dic["colorbar"]["exists"]:
if len(ax.images) > 0:
image = ax.images[0]
elif len(ax.collections) > 0:
image = ax.images[0]
else:
raise RuntimeError("self.color_bar_remade set to True whilst no colorbar found")
self.update_colorbar_from_dict(image, dic["colorbar"])
@staticmethod
def create_text_from_dict(ax, dic):
style_dic = dic["style"]
ax.text(x=dic["position"][0],
y=dic["position"][1],
s=dic["text"],
fontdict={
u'alpha': style_dic["alpha"],
u'color': style_dic["color"],
u'rotation': style_dic["rotation"],
u'fontsize': style_dic["textSize"],
u'zorder': style_dic["zOrder"],
u'usetex': dic["useTeX"],
u'horizontalalignment': style_dic["hAlign"],
u'verticalalignment': style_dic["vAlign"]
})
@staticmethod
def update_lines(ax, line_settings):
# update current line setting with settings from file
def update_line_setting(line_update_method, settings, setting):
if setting in settings:
line_update_method(settings[setting])
# get current line and update settings
current_line = ax.lines[line_settings["lineIndex"]]
update_line_setting(current_line.set_label, line_settings, "label")
update_line_setting(current_line.set_alpha, line_settings, "alpha")
update_line_setting(current_line.set_color, line_settings, "color")
update_line_setting(current_line.set_linestyle, line_settings, "lineStyle")
update_line_setting(current_line.set_linewidth, line_settings, "lineWidth")
marker_style = line_settings["markerStyle"]
update_line_setting(current_line.set_markerfacecolor, marker_style, "faceColor")
update_line_setting(current_line.set_markeredgecolor, marker_style, "edgeColor")
update_line_setting(current_line.set_markeredgewidth, marker_style, "edgeWidth")
update_line_setting(current_line.set_marker, marker_style, "markerType")
update_line_setting(current_line.set_markersize, marker_style, "markerSize")
update_line_setting(current_line.set_zorder, marker_style, "zOrder")
errorbar_style = line_settings["errorbars"]
if errorbar_style["exists"]:
update_line_setting(current_line.set_dash_capstyle, errorbar_style, "dashCapStyle")
update_line_setting(current_line.set_dash_joinstyle, errorbar_style, "dashJoinStyle")
update_line_setting(current_line.set_solid_capstyle, errorbar_style, "solidCapStyle")
update_line_setting(current_line.set_solid_joinstyle, errorbar_style, "solidJoinStyle")
@staticmethod
def update_legend(ax, legend):
if not legend["exists"] and ax.get_legend():
ax.get_legend().remove()
return
if legend["exists"]:
LegendProperties.create_legend(legend, ax)
def update_properties(self, ax, properties):
# Support for additonal plot options accessible from general settings
if "tickParams" in properties.keys():
ax.xaxis.set_tick_params(which="major", **properties["tickParams"]["xaxis"]["major"])
ax.xaxis.set_tick_params(which="minor", **properties["tickParams"]["xaxis"]["minor"])
ax.yaxis.set_tick_params(which="major", **properties["tickParams"]["yaxis"]["major"])
ax.yaxis.set_tick_params(which="minor", **properties["tickParams"]["yaxis"]["minor"])
if properties["bounds"]:
ax.set_position(properties["bounds"])
ax.set_navigate(properties["dynamic"])
ax.axison = properties["axisOn"]
ax.set_frame_on(properties["frameOn"])
ax.set_visible(properties["visible"])
ax.set_xscale(properties["xAxisScale"])
ax.set_yscale(properties["yAxisScale"])
if "xAutoScale" in properties and properties["xAutoScale"]:
ax.autoscale(True, axis="x")
else:
ax.set_xlim(properties["xLim"])
if "yAutoScale" in properties and properties["yAutoScale"]:
ax.autoscale(True, axis="y")
else:
ax.set_ylim(properties["yLim"])
ax.show_minor_gridlines = properties["showMinorGrid"]
# Update X Axis
if "xAxisProperties" in properties:
self.update_axis(ax.xaxis, properties["xAxisProperties"])
# Update Y Axis
if "yAxisProperties" in properties:
self.update_axis(ax.yaxis, properties["yAxisProperties"])
if 'spineWidths' in properties:
for (spine, width) in properties['spineWidths'].items():
ax.spines[spine].set_linewidth(width)
def update_axis(self, axis_, properties):
if "position" in properties.keys():
# Support for older .mtdproj files that did not include additional
# plot settings introduced in PR #30121
if isinstance(axis_, matplotlib.axis.XAxis):
if properties["position"] == "top":
axis_.tick_top()
else:
axis_.tick_bottom()
if isinstance(axis_, matplotlib.axis.YAxis):
if properties["position"] == "right":
axis_.tick_right()
else:
axis_.tick_left()
labels = axis_.get_ticklabels()
if properties["fontSize"] != "":
for label in labels:
label.set_fontsize(properties["fontSize"])
axis_.set_visible(properties["visible"])
# Set axis tick data
self.update_axis_ticks(axis_, properties)
# Set grid data
self.update_grid_style(axis_, properties)
@staticmethod
def update_grid_style(axis_, properties):
grid_dict = properties["gridStyle"]
grid_lines = axis_.get_gridlines()
if grid_dict["gridOn"]:
which = 'both' if grid_dict["minorGridOn"] else "major"
axis_.axes.grid(True, axis=axis_.axis_name, which=which)
for grid_line in grid_lines:
grid_line.set_alpha(grid_dict["alpha"])
grid_line.set_color(grid_dict["color"])
@staticmethod
def update_axis_ticks(axis_, properties):
# Update Major and Minor Locator
if properties["majorTickLocator"] == "FixedLocator":
axis_.set_major_locator(ticker.FixedLocator(properties["majorTickLocatorValues"]))
if properties["minorTickLocator"] == "FixedLocator":
axis_.set_minor_locator(ticker.FixedLocator(properties["minorTickLocatorValues"]))
elif properties["minorTickLocator"] == "AutoMinorLocator":
axis_.set_minor_locator(ticker.AutoMinorLocator())
# Update Major and Minor TickFormatter
fmt = get_tick_format(TICK_FORMATTERS,
properties["majorTickFormatter"],
properties["majorTickFormat"])
axis_.set_major_formatter(fmt)
fmt = get_tick_format(TICK_FORMATTERS,
properties["minorTickFormatter"],
properties["minorTickFormat"])
axis_.set_minor_formatter(fmt)
@staticmethod
def update_colorbar_from_dict(image, dic):
# colorbar = image.colorbar
image.set_clim(*sorted([dic["min"], dic["max"]]))
image.set_label(dic["label"])
image.set_cmap(cm.get_cmap(dic["cmap"]))
image.set_interpolation(dic["interpolation"])
# Try and make the cmap line up but sometimes it wont
try:
image.axes.set_cmap(cm.get_cmap(dic["cmap"]))
except AttributeError as e:
logger.debug(
"PlotsLoader - The Image accessed did not have an axes with the ability to set the cmap: "
+ str(e))
# Redraw
image.axes.figure.canvas.draw()
| gpl-3.0 |
bootandy/Axelrod | axelrod/tournament_manager.py | 1 | 6755 | from __future__ import absolute_import, unicode_literals, print_function
import os
import cloudpickle as pickle
from .tournament import *
from .plot import *
from .ecosystem import *
from .utils import *
class TournamentManager(object):
def __init__(self, output_directory, with_ecological,
pass_cache=True, load_cache=True, save_cache=False,
cache_file='./cache.txt'):
self._tournaments = []
self._ecological_variants = []
self._logger = logging.getLogger(__name__)
self._output_directory = output_directory
self._with_ecological = with_ecological
self._pass_cache = pass_cache
self._save_cache = save_cache
self._cache_file = cache_file
self._deterministic_cache = {}
self._cache_valid_for_turns = None
self._load_cache = False
if load_cache and not save_cache:
self.load_cache = self._load_cache_from_file(cache_file)
@staticmethod
def one_player_per_strategy(strategies):
return [strategy() for strategy in strategies]
def add_tournament(self, name, players, game=None, turns=200,
repetitions=10, processes=None, noise=0,
with_morality=True):
tournament = Tournament(
name=name,
players=players,
turns=turns,
repetitions=repetitions,
processes=processes,
noise=noise,
with_morality=with_morality)
self._tournaments.append(tournament)
def run_tournaments(self):
t0 = time.time()
for tournament in self._tournaments:
self._run_single_tournament(tournament)
if self._save_cache and not tournament.noise:
self._save_cache_to_file(self._deterministic_cache, self._cache_file)
self._logger.info(timed_message('Finished all tournaments', t0))
def _run_single_tournament(self, tournament):
self._logger.info(
'Starting %s tournament with %d round robins of %d turns per pair.'
% (tournament.name, tournament.repetitions, tournament.turns))
t0 = time.time()
if not tournament.noise and self._pass_cache and self._valid_cache(tournament.turns):
self._logger.debug('Passing cache with %d entries to %s tournament' %
(len(self._deterministic_cache), tournament.name))
tournament.deterministic_cache = self._deterministic_cache
if self._load_cache:
tournament.prebuilt_cache = True
else:
self._logger.debug('Cache is not valid for %s tournament' %
tournament.name)
tournament.play()
self._logger.debug(timed_message('Finished %s tournament' % tournament.name, t0))
if self._with_ecological:
ecosystem = Ecosystem(tournament.result_set)
self.run_ecological_variant(tournament, ecosystem)
else:
ecosystem = None
self._generate_output_files(tournament, ecosystem)
self._cache_valid_for_turns = tournament.turns
self._logger.debug('Cache now has %d entries' %
len(self._deterministic_cache))
self._logger.info(
timed_message('Finished all %s tasks' % tournament.name, t0))
def _valid_cache(self, turns):
return ((len(self._deterministic_cache) == 0) or
(len(self._deterministic_cache) > 0) and
turns == self._cache_valid_for_turns)
def run_ecological_variant(self, tournament, ecosystem):
self._logger.debug(
'Starting ecological variant of %s' % tournament.name)
t0 = time.time()
ecoturns = {
'basic_strategies': 1000,
'cheating_strategies': 10,
'strategies': 1000,
'all_strategies': 10,
}
ecosystem.reproduce(ecoturns.get(tournament.name))
self._logger.debug(
timed_message('Finished ecological variant of %s' % tournament.name, t0))
def _generate_output_files(self, tournament, ecosystem=None):
self._save_csv(tournament)
self._save_plots(tournament, ecosystem)
def _save_csv(self, tournament):
csv = tournament.result_set.csv()
file_name = self._output_file_path(
tournament.name, 'csv')
with open(file_name, 'w') as f:
f.write(csv)
def _save_plots(self, tournament, ecosystem=None, image_format="svg"):
results = tournament.result_set
plot = Plot(results)
if not plot.matplotlib_installed:
self._logger.error('The matplotlib library is not installed. '
'No plots will be produced')
return
for plot_type in ('boxplot', 'payoff', 'winplot', 'sdvplot', 'pdplot'):
figure = getattr(plot, plot_type)()
file_name = self._output_file_path(
tournament.name + '_' + plot_type, image_format)
self._save_plot(figure, file_name)
if ecosystem is not None:
figure = plot.stackplot(ecosystem)
file_name = self._output_file_path(
tournament.name + '_reproduce', image_format)
self._save_plot(figure, file_name)
def _output_file_path(self, file_name, file_extension):
return os.path.join(
self._output_directory,
file_name + '.' + file_extension)
@staticmethod
def _save_plot(figure, file_name, dpi=400):
figure.savefig(file_name, bbox_inches='tight', dpi=dpi)
figure.clf()
plt.close(figure)
def _save_cache_to_file(self, cache, file_name):
self._logger.debug(
'Saving cache with %d entries to %s' % (len(cache), file_name))
deterministic_cache = DeterministicCache(
cache, self._cache_valid_for_turns)
with open(file_name, 'wb') as io:
pickle.dump(deterministic_cache, io)
return True
def _load_cache_from_file(self, file_name):
try:
with open(file_name, 'rb') as io:
deterministic_cache = pickle.load(io)
self._deterministic_cache = deterministic_cache.cache
self._cache_valid_for_turns = deterministic_cache.turns
self._logger.debug(
'Loaded cache with %d entries' % len(self._deterministic_cache))
return True
except IOError:
self._logger.debug('Cache file not found. Starting with empty cache')
return False
class DeterministicCache(object):
def __init__(self, cache, turns):
self.cache = cache
self.turns = turns
| mit |
dsm054/pandas | pandas/tests/groupby/test_rank.py | 1 | 12571 | import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame({'value': np.random.randn(500),
'key1': lev1.take(lab1),
'key2': lev2.take(lab2)})
result = df.groupby(['key1', 'key2']).value.rank()
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank())
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(['key1', 'key2']).value.rank(pct=True)
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank(pct=True))
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[2, 2, 8, 2, 6],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06')]])
@pytest.mark.parametrize("ties_method,ascending,pct,exp", [
('average', True, False, [2., 2., 5., 2., 4.]),
('average', True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
('average', False, False, [4., 4., 1., 4., 2.]),
('average', False, True, [.8, .8, .2, .8, .4]),
('min', True, False, [1., 1., 5., 1., 4.]),
('min', True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
('min', False, False, [3., 3., 1., 3., 2.]),
('min', False, True, [.6, .6, .2, .6, .4]),
('max', True, False, [3., 3., 5., 3., 4.]),
('max', True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
('max', False, False, [5., 5., 1., 5., 2.]),
('max', False, True, [1., 1., .2, 1., .4]),
('first', True, False, [1., 2., 5., 3., 4.]),
('first', True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
('first', False, False, [3., 4., 1., 5., 2.]),
('first', False, True, [.6, .8, .2, 1., .4]),
('dense', True, False, [1., 1., 3., 1., 2.]),
('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]),
('dense', False, False, [3., 3., 1., 3., 2.]),
('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]),
])
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[-np.inf, -np.inf, np.nan, 1., np.nan, np.inf, np.inf],
])
@pytest.mark.parametrize("ties_method,ascending,na_option,exp", [
('average', True, 'keep', [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
('average', True, 'top', [3.5, 3.5, 1.5, 5., 1.5, 6.5, 6.5]),
('average', True, 'bottom', [1.5, 1.5, 6.5, 3., 6.5, 4.5, 4.5]),
('average', False, 'keep', [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
('average', False, 'top', [6.5, 6.5, 1.5, 5., 1.5, 3.5, 3.5]),
('average', False, 'bottom', [4.5, 4.5, 6.5, 3., 6.5, 1.5, 1.5]),
('min', True, 'keep', [1., 1., np.nan, 3., np.nan, 4., 4.]),
('min', True, 'top', [3., 3., 1., 5., 1., 6., 6.]),
('min', True, 'bottom', [1., 1., 6., 3., 6., 4., 4.]),
('min', False, 'keep', [4., 4., np.nan, 3., np.nan, 1., 1.]),
('min', False, 'top', [6., 6., 1., 5., 1., 3., 3.]),
('min', False, 'bottom', [4., 4., 6., 3., 6., 1., 1.]),
('max', True, 'keep', [2., 2., np.nan, 3., np.nan, 5., 5.]),
('max', True, 'top', [4., 4., 2., 5., 2., 7., 7.]),
('max', True, 'bottom', [2., 2., 7., 3., 7., 5., 5.]),
('max', False, 'keep', [5., 5., np.nan, 3., np.nan, 2., 2.]),
('max', False, 'top', [7., 7., 2., 5., 2., 4., 4.]),
('max', False, 'bottom', [5., 5., 7., 3., 7., 2., 2.]),
('first', True, 'keep', [1., 2., np.nan, 3., np.nan, 4., 5.]),
('first', True, 'top', [3., 4., 1., 5., 2., 6., 7.]),
('first', True, 'bottom', [1., 2., 6., 3., 7., 4., 5.]),
('first', False, 'keep', [4., 5., np.nan, 3., np.nan, 1., 2.]),
('first', False, 'top', [6., 7., 1., 5., 2., 3., 4.]),
('first', False, 'bottom', [4., 5., 6., 3., 7., 1., 2.]),
('dense', True, 'keep', [1., 1., np.nan, 2., np.nan, 3., 3.]),
('dense', True, 'top', [2., 2., 1., 3., 1., 4., 4.]),
('dense', True, 'bottom', [1., 1., 4., 2., 4., 3., 3.]),
('dense', False, 'keep', [3., 3., np.nan, 2., np.nan, 1., 1.]),
('dense', False, 'top', [4., 4., 1., 3., 1., 2., 2.]),
('dense', False, 'bottom', [3., 3., 4., 2., 4., 1., 1.])
])
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06'), np.nan, np.nan]
])
@pytest.mark.parametrize("ties_method,ascending,na_option,pct,exp", [
('average', True, 'keep', False,
[2., 2., np.nan, 5., 2., 4., np.nan, np.nan]),
('average', True, 'keep', True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan]),
('average', False, 'keep', False,
[4., 4., np.nan, 1., 4., 2., np.nan, np.nan]),
('average', False, 'keep', True,
[.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan]),
('min', True, 'keep', False,
[1., 1., np.nan, 5., 1., 4., np.nan, np.nan]),
('min', True, 'keep', True,
[0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
('min', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('min', False, 'keep', True,
[.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
('max', True, 'keep', False,
[3., 3., np.nan, 5., 3., 4., np.nan, np.nan]),
('max', True, 'keep', True,
[0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
('max', False, 'keep', False,
[5., 5., np.nan, 1., 5., 2., np.nan, np.nan]),
('max', False, 'keep', True,
[1., 1., np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
('first', True, 'keep', False,
[1., 2., np.nan, 5., 3., 4., np.nan, np.nan]),
('first', True, 'keep', True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
('first', False, 'keep', False,
[3., 4., np.nan, 1., 5., 2., np.nan, np.nan]),
('first', False, 'keep', True,
[.6, 0.8, np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
('dense', True, 'keep', False,
[1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
('dense', True, 'keep', True,
[1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]),
('dense', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('dense', False, 'keep', True,
[3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]),
('average', True, 'bottom', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
('average', True, 'bottom', True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
('average', False, 'bottom', False, [4., 4., 7., 1., 4., 2., 7., 7.]),
('average', False, 'bottom', True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875]),
('min', True, 'bottom', False, [1., 1., 6., 5., 1., 4., 6., 6.]),
('min', True, 'bottom', True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75]),
('min', False, 'bottom', False, [3., 3., 6., 1., 3., 2., 6., 6.]),
('min', False, 'bottom', True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75]),
('max', True, 'bottom', False, [3., 3., 8., 5., 3., 4., 8., 8.]),
('max', True, 'bottom', True,
[0.375, 0.375, 1., 0.625, 0.375, 0.5, 1., 1.]),
('max', False, 'bottom', False, [5., 5., 8., 1., 5., 2., 8., 8.]),
('max', False, 'bottom', True,
[0.625, 0.625, 1., 0.125, 0.625, 0.25, 1., 1.]),
('first', True, 'bottom', False, [1., 2., 6., 5., 3., 4., 7., 8.]),
('first', True, 'bottom', True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.]),
('first', False, 'bottom', False, [3., 4., 6., 1., 5., 2., 7., 8.]),
('first', False, 'bottom', True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
('dense', True, 'bottom', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
('dense', True, 'bottom', True,
[0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]),
('dense', False, 'bottom', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
('dense', False, 'bottom', True,
[0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.])
])
def test_rank_args_missing(grps, vals, ties_method, ascending,
na_option, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("pct,exp", [
(False, [3., 3., 3., 3., 3.]),
(True, [.6, .6, .6, .6, .6])])
def test_rank_resets_each_group(pct, exp):
df = DataFrame(
{'key': ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'],
'val': [1] * 10}
)
result = df.groupby('key').rank(pct=pct)
exp_df = DataFrame(exp * 2, columns=['val'])
tm.assert_frame_equal(result, exp_df)
def test_rank_avg_even_vals():
df = DataFrame({'key': ['a'] * 4, 'val': [1] * 4})
result = df.groupby('key').rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("ties_method", [
'average', 'min', 'max', 'first', 'dense'])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize("vals", [
['bar', 'bar', 'foo', 'bar', 'baz'],
['bar', np.nan, 'foo', np.nan, 'baz']
])
def test_rank_object_raises(ties_method, ascending, na_option,
pct, vals):
df = DataFrame({'key': ['foo'] * 5, 'val': vals})
with pytest.raises(TypeError, match="not callable"):
df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
@pytest.mark.parametrize("na_option", [True, "bad", 1])
@pytest.mark.parametrize("ties_method", [
'average', 'min', 'max', 'first', 'dense'])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize("vals", [
['bar', 'bar', 'foo', 'bar', 'baz'],
['bar', np.nan, 'foo', np.nan, 'baz'],
[1, np.nan, 2, np.nan, 3]
])
def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):
df = DataFrame({'key': ['foo'] * 5, 'val': vals})
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
def test_rank_empty_group():
# see gh-22519
column = "A"
df = DataFrame({
"A": [0, 1, 0],
"B": [1., np.nan, 2.]
})
result = df.groupby(column).B.rank(pct=True)
expected = Series([0.5, np.nan, 1.0], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby(column).rank(pct=True)
expected = DataFrame({"B": [0.5, np.nan, 1.0]})
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
cauchycui/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
iansf/pstar | setup.py | 1 | 1296 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
def readme():
return '`pstar` documentation and source code can be found at https://github.com/iansf/pstar.'
def version():
return '0.1.9'
setuptools.setup(
name='pstar',
description='pstar: numpy for arbitrary data',
long_description=readme(),
long_description_content_type='text/markdown',
version=version(),
url='https://github.com/iansf/pstar',
download_url='https://github.com/iansf/pstar/archive/%s.tar.gz' % version(),
author='Ian Fischer, Google',
author_email='[email protected]',
packages=['pstar'],
license='Apache 2.0',
install_requires=['qj'],
test_suite='nose.collector',
tests_require=['matplotlib', 'mock', 'nose'],
)
| apache-2.0 |
bluemonk482/emotionannotate | src/EmbeddingFeature.py | 1 | 2604 | from sklearn.base import BaseEstimator
import re
import os
import numpy as np
import pandas as pd
import gensim
class streamdata(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, fname)):
yield line.rstrip().split('\t')
def readTang(fname='../word2vec/sswe'):
embs = streamdata(fname)
embedmodel = {}
for tw2vec in embs:
wd = tw2vec[0]
value = [float(i) for i in tw2vec[1:]]
embedmodel[wd] = np.array(value)
return embedmodel
class EmbeddingVectorizer(BaseEstimator):
def __init__(self, w2vf='../word2vec/w2v/c10_w3_s100', sswef='../word2vec/sswe'):
self.w2v = gensim.models.Word2Vec.load(w2vf)
self.sswe = readTang(sswef)
def emdsswe(self, uni):
f = np.array([])
f = self.sswe.get(uni, self.sswe['<unk>'])
return f
def emdw2v(self, uni):
f = np.array([])
try:
f = self.w2v[uni]
except:
pass
return f
def concattw(self, feature, size, tw, etype):
feat = np.array([])
for i, uni in enumerate(tw):
if etype == 'w2v':
f = self.emdw2v(uni)
if etype == 'sswe':
f = self.emdsswe(uni)
feat = np.concatenate([feat, f])
if list(feat) == []:
feat = np.zeros((2*size,))
if len(feat) <= size:
feat = np.concatenate([feat, np.zeros((size,))])
feat = feat.reshape(len(feat)/size, size)
feature = np.concatenate([feature, feat.sum(axis=0)])
feature = np.concatenate([feature, feat.max(axis=0)])
# feature = np.concatenate([feature, feat.min(axis=0)])
feature = np.concatenate([feature, feat.mean(axis=0)])
# feature = np.concatenate([feature, feat.std(axis=0)])
# feature = np.concatenate([feature, feat.prod(axis=0)])
return feature
def fit(self, documents, y=None):
return self
def transform(self, documents):
x = np.array([])
size1 = len(self.w2v['the'])
size2 = len(self.sswe['the'])
for tweet in documents:
d = tweet.lower()
tw = d.split()
feature = np.array([])
feature = self.concattw(feature, size1, tw, 'w2v')
feature = self.concattw(feature, size2, tw, 'sswe')
x = np.concatenate([x, feature])
x = x.reshape((len(documents), len(x)/len(documents)))
return x
| gpl-3.0 |
mutirri/bokeh | examples/app/stock_applet/stock_app_simple.py | 43 | 12408 | """
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import (HBox, VBox, VBoxForm, PreText,
Select, AppHBox, AppVBox, AppVBoxForm)
from bokeh.simpleapp import simpleapp
select1 = Select(name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
select2 = Select(name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
@simpleapp(select1, select2)
def stock(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
row1 = HBox(children=[p, pretext])
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
row2 = HBox(children=[hist1, hist2])
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
output = VBox(children=[row1, row2, line1, line2])
return output
stock.route("/bokeh/stocks/")
@simpleapp(select1, select2)
def stock2(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
return dict(scatterplot=p,
statstext=pretext,
hist1=hist1,
hist2=hist2,
line1=line1,
line2=line2)
@stock2.layout
def stock2_layout(app):
widgets = AppVBoxForm(app=app, children=['ticker1', 'ticker2'])
row1 = AppHBox(app=app, children=['scatterplot', 'statstext'])
row2 = AppHBox(app=app, children=['hist1', 'hist2'])
all_plots = AppVBox(app=app, children=[row1, row2, 'line1', 'line2'])
app = AppHBox(app=app, children=[widgets, all_plots])
return app
@stock2.update(['ticker1', 'ticker2'])
def stock2_update_input(ticker1, ticker2, app):
return stock2(ticker1, ticker2)
@stock2.update([({'tags' : 'main_source'}, ['selected'])])
def stock2_update_selection(ticker1, ticker2, app):
source = app.select_one({'tags' : 'main_source'})
df = get_data(ticker1, ticker2)
if source.selected:
selected_df = df.iloc[source.selected['1d']['indices'], :]
else:
selected_df = df
stats_text = app.objects['statstext']
stats_text.text = str(selected_df.describe())
return {
'hist1': hist_plot(df, ticker1, selected_df=selected_df),
'hist2': hist_plot(df, ticker2, selected_df=selected_df),
'statstext': stats_text,
}
stock2.route("/bokeh/stocks2/")
def hist_plot(df, ticker, selected_df=None):
if selected_df is None:
selected_df = df
global_hist, global_bins = np.histogram(df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def line_plot(ticker, source, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=source,
nonselection_alpha=0.02
)
return p
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
# class StockApp(VBox):
# extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
# jsmodel = "VBox"
# # text statistics
# pretext = Instance(PreText)
# # plots
# plot = Instance(Plot)
# line_plot1 = Instance(Plot)
# line_plot2 = Instance(Plot)
# hist1 = Instance(Plot)
# hist2 = Instance(Plot)
# # data source
# source = Instance(ColumnDataSource)
# # layout boxes
# mainrow = Instance(HBox)
# histrow = Instance(HBox)
# statsbox = Instance(VBox)
# # inputs
# ticker1 = String(default="AAPL")
# ticker2 = String(default="GOOG")
# ticker1_select = Instance(Select)
# ticker2_select = Instance(Select)
# input_box = Instance(VBoxForm)
# def __init__(self, *args, **kwargs):
# super(StockApp, self).__init__(*args, **kwargs)
# self._dfs = {}
# @classmethod
# def create(cls):
# """
# This function is called once, and is responsible for
# creating all objects (plots, datasources, etc)
# """
# # create layout widgets
# obj = cls()
# # create input widgets
# obj.make_inputs()
# # outputs
# obj.pretext = PreText(text="", width=500)
# obj.make_source()
# obj.make_plots()
# obj.make_stats()
# # layout
# obj.set_children()
# return obj
# def make_inputs(self):
# self.ticker1_select = Select(
# name='ticker1',
# value='AAPL',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# self.ticker2_select = Select(
# name='ticker2',
# value='GOOG',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# @property
# def selected_df(self):
# pandas_df = self.df
# selected = self.source.selected
# if selected:
# pandas_df = pandas_df.iloc[selected, :]
# return pandas_df
# def make_source(self):
# self.source = ColumnDataSource(data=self.df)
# def line_plot(self, ticker, x_range=None):
# p = figure(
# title=ticker,
# x_range=x_range,
# x_axis_type='datetime',
# plot_width=1000, plot_height=200,
# title_text_font_size="10pt",
# tools="pan,wheel_zoom,box_select,reset"
# )
# p.circle(
# 'date', ticker,
# size=2,
# source=self.source,
# nonselection_alpha=0.02
# )
# return p
# def hist_plot(self, ticker):
# global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
# hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
# width = 0.7 * (bins[1] - bins[0])
# center = (bins[:-1] + bins[1:]) / 2
# start = global_bins.min()
# end = global_bins.max()
# top = hist.max()
# p = figure(
# title="%s hist" % ticker,
# plot_width=500, plot_height=200,
# tools="",
# title_text_font_size="10pt",
# x_range=[start, end],
# y_range=[0, top],
# )
# p.rect(center, hist / 2.0, width, hist)
# return p
# def make_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# p = figure(
# title="%s vs %s" % (ticker1, ticker2),
# plot_width=400, plot_height=400,
# tools="pan,wheel_zoom,box_select,reset",
# title_text_font_size="10pt",
# )
# p.circle(ticker1 + "_returns", ticker2 + "_returns",
# size=2,
# nonselection_alpha=0.02,
# source=self.source
# )
# self.plot = p
# self.line_plot1 = self.line_plot(ticker1)
# self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
# self.hist_plots()
# def hist_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# self.hist1 = self.hist_plot(ticker1)
# self.hist2 = self.hist_plot(ticker2)
# def set_children(self):
# self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
# self.mainrow.children = [self.input_box, self.plot, self.statsbox]
# self.input_box.children = [self.ticker1_select, self.ticker2_select]
# self.histrow.children = [self.hist1, self.hist2]
# self.statsbox.children = [self.pretext]
# def input_change(self, obj, attrname, old, new):
# if obj == self.ticker2_select:
# self.ticker2 = new
# if obj == self.ticker1_select:
# self.ticker1 = new
# self.make_source()
# self.make_plots()
# self.set_children()
# curdoc().add(self)
# def setup_events(self):
# super(StockApp, self).setup_events()
# if self.source:
# self.source.on_change('selected', self, 'selection_change')
# if self.ticker1_select:
# self.ticker1_select.on_change('value', self, 'input_change')
# if self.ticker2_select:
# self.ticker2_select.on_change('value', self, 'input_change')
# def make_stats(self):
# stats = self.selected_df.describe()
# self.pretext.text = str(stats)
# def selection_change(self, obj, attrname, old, new):
# self.make_stats()
# self.hist_plots()
# self.set_children()
# curdoc().add(self)
# @property
# def df(self):
# return get_data(self.ticker1, self.ticker2)
# # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# # will render this StockApp. If you don't want serve this applet from a Bokeh
# # server (for instance if you are embedding in a separate Flask application),
# # then just remove this block of code.
# @bokeh_app.route("/bokeh/stocks/")
# @object_page("stocks")
# def make_object():
# app = StockApp.create()
# return app
| bsd-3-clause |
mayblue9/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
sawenzel/AliceO2 | Detectors/FIT/benchmark/process.py | 6 | 12238 | # load modules
import re
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
# use classic plot style
plt.style.use('classic')
# read and save user input filenames
mem_filename = sys.argv[1]
cpu_filename = sys.argv[2]
# save the process id names
process_id_mem = re.findall('mem_evolution_(\\d+)', mem_filename)[0]
process_id_cpu = re.findall('cpu_evolution_(\\d+)', cpu_filename)[0]
# check that the process id names are the same
if not process_id_mem==process_id_cpu:
# throw error if true and exit program
sys.stderr.write("The memory and cpu process filenames do not match...\n")
print("input memory filename: ",mem_filename)
print("inpu cpu filename: ",cpu_filename)
exit(1)
# save the main process id (driver application)
process_id = process_id_mem + '.txt' # as string '<PID>.txt'
# save the same process id (driver application), but as a float
driver = float(process_id_mem)
# load the o2 command given
with open(mem_filename) as f:
title = f.readline()
# extract the command given
title = re.findall('#command line: (\\w.+)', title)[0]
# declare string variables for different runs
simulation = 'o2-sim '
serial = 'o2-sim-serial'
digitization = 'o2-sim-digitizer-workflow'
# print the command for the user
print("\nYour command was: ", title)
# check what type of command and parse it to a logfile variable
if title.find(simulation) == 0:
print("You have monitored o2 simulation in parallel.\n")
command=simulation
logfilename = 'o2sim.log'
elif title.find(serial) == 0:
print("You have monitored o2 simulation in serial.\n")
command=serial
logfilename = 'o2sim.log'
elif title.find(digitization) == 0:
command=digitization
print("You have monitored o2 digitization.\n")
logfilename = 'o2digi.log'
else :
print("I do not know this type of simulation.\n")
exit(1)
#################################################
# #
# Extract the PIDs from logfile #
# #
#################################################
if command==simulation: # True if you typed o2-sim
try:
# open o2sim.log file name
with open(logfilename) as logfile:
# read and save the first 6 lines in o2sim.log
loglines = [next(logfile) for line in range(6)]
# print("*******************************\n")
# print("Driver application PID is: ", driver)
# find the PID for the event generator (o2-sim-primary-..)
eventgenerator_line = re.search('Spawning particle server on PID (.*); Redirect output to serverlog\n',loglines[3])
event_gen = float(eventgenerator_line.group(1))
# print("Eventgenerator PID is: ", event_gen)
# find the PID for sim worker 0 (o2-sim-device-runner)
sim_worker_line = re.search('Spawning sim worker 0 on PID (.*); Redirect output to workerlog0\n',loglines[4])
sim_worker = float(sim_worker_line.group(1))
# print("SimWorker 0 PID is: ", sim_worker)
# find the PID for the hitmerger (o2-sim-hitmerger)
hitmerger_line = re.search('Spawning hit merger on PID (.*); Redirect output to mergerlog\n',loglines[5])
hit_merger = float(hitmerger_line.group(1))
# print("Hitmerger PID is: ", hit_merger, "\n")
# print("*******************************\n")
# find the number of simulation workers
n_workers = int(re.findall('Running with (\\d+)', loglines[1])[0])
# save into a list
pid_names = ['driver','event gen','sim worker 0','hit merger']
pid_vals = [driver,event_gen,sim_worker,hit_merger]
# append pid names for remaining workers
for i in range(n_workers-1):
pid_names.append(f"sim worker {i+1}")
no_log = False
except IOError:
print("There exists no o2sim.log..")
print("No details of devices will be provided.")
no_log = True
elif command==digitization: # True if you typed o2-sim-digitizer-workflow
try:
# open o2digi.log file name
with open(logfilename) as logfile:
# save the first 100 lines in o2digi.log
loglines = [next(logfile) for line in range(100)]
# declare list for PID numbers and names
pid_vals = []
pid_names = []
# loop through lines to find PIDs
for line_num,line in enumerate(loglines):
pid_line = re.findall('Starting (\\w.+) on pid (\\d+)',line)
if pid_line: # True if the line contains 'Start <PID name> on pid <PID number>'
# assign the name and value to variables
pid_name = pid_line[0][0]
pid_val = float(pid_line[0][1])
# save to list
pid_names.append(pid_name)
pid_vals.append(pid_val)
# insert driver application name and value
pid_names.insert(0,'driver')
pid_vals.insert(0,driver)
# for id in range(len(pid_names)):
# print(pid_names[id],"PID is: ",pid_vals[id])
# print(pid_vals[pid])
# print("*******************************\n")
no_log = False
except IOError:
print("There exists no o2digi.log..")
print("No details of devices will be provided.")
no_log = True
elif command==serial:
print("*******************************\n")
print("Driver application PID is: ", driver)
print("There are no other PIDs")
no_log = False
else :
print("Something went wrong.. exiting")
exit(1)
############### End of PID extraction #################
# get time and PID filenames
time_filename = 'time_evolution_' + process_id
pid_filename = 'pid_evolution_' + process_id
# load data as pandas DataFrame (DataFrame due to uneven number of coloumns in file)
mem = pd.read_csv(mem_filename, skiprows=2, sep=" +", engine="python",header=None)
cpu = pd.read_csv(cpu_filename, skiprows=2, sep=" +", engine="python",header=None)
pid = pd.read_csv(pid_filename, skiprows=2, sep=" +", engine="python",header=None)
t = np.loadtxt(time_filename) # time in ms (mili-seconds)
# extract values from the DataFrame
mem = mem[1:].values
cpu = cpu[1:].values
pid = pid[1:].values
# process time series
t = t-t[0] # rescale time such that t_start=0
t = t*10**(-3) # convert mili-seconds to seconds
# replace 'Nones' (empty) elements w/ zeros and convert string values to floats
mem = np.nan_to_num(mem.astype(np.float))
cpu = np.nan_to_num(cpu.astype(np.float))
pid = np.nan_to_num(pid.astype(np.float))
# find all process identifaction numbers involved (PIDs), the index of their first
# occurence (index) for an unraveled array and the total number of apperances (counts) in the process
PIDs, index, counts = np.unique(pid,return_index=True,return_counts=True)
# NOTE: we don't want to count 'fake' PIDs. These are PIDs that spawns only once not taking
# any memory or cpu. Due to their appearence they shift the colomns in all monitored files.
# This needs to be taken care of and they are therefore deleted from the removed.
# return the index of the fake pids
fake = np.where(counts==1)
# delete the fake pids from PIDs list
PIDs = np.delete(PIDs,fake)
index = np.delete(index,fake)
counts = np.delete(counts,fake)
# we also dele PID=0, as this is not a real PID
PIDs = np.delete(PIDs,0)
index = np.delete(index,0)
counts = np.delete(counts,0)
# get number of real PIDs
nPIDs = len(PIDs)
# dimension of data
dim = pid.shape # could also use from time series
# NOTE: dimensiton is always (n_steps, 40)
# because of '#' characters in ./monitor.sh
# number of steps in simulation for o2-sim
steps = len(pid[:,0]) # could also use from time series
# declare final lists
m = [] # memory
c = [] # cpu
p = [] # process
for i in range(nPIDs): # loop through all valid PIDs
# find the number of zeros to pad with
init_zeros, _ = np.unravel_index(index[i],dim)
# pad the 'initial' zeros (begining)
mem_dummy = np.hstack((np.zeros(init_zeros),mem[pid==PIDs[i]]))
cpu_dummy = np.hstack((np.zeros(init_zeros),cpu[pid==PIDs[i]]))
pid_dummy = np.hstack((np.zeros(init_zeros),pid[pid==PIDs[i]]))
# find the difference in final steps
n_diff = steps - len(mem_dummy)
# pad the ending w/ zeros
mem_dummy = np.hstack((mem_dummy,np.zeros(n_diff)))
cpu_dummy = np.hstack((cpu_dummy,np.zeros(n_diff)))
pid_dummy = np.hstack((pid_dummy,np.zeros(n_diff)))
# save to list
m.append(mem_dummy)
c.append(cpu_dummy)
p.append(pid_dummy)
#print("PID is: ",PIDs[i])
#print("initial number of zeros to pad: ", init_zeros)
#print("final number of zeros to pad: ", n_diff)
#print("**************\n")
# convert to array and assure correct shape of arrays
m = np.asarray(m).T
c = np.asarray(c).T
p = np.asarray(p).T
###################################
# #
# COMPUTATIONS #
# #
###################################
print("********************************")
# compute average memory and maximum memory
M = np.sum(m,axis=1) # sum all processes memory
max_mem = np.max(M) # find maximum
mean_mem = np.mean(M) # find mean
print(f"max mem: {max_mem:.2f} MB")
print(f"mean mem: {mean_mem:.2f} MB")
C = np.sum(c,axis=1) # compute total cpu
max_cpu = np.max(C)
print(f"max cpu: {max_cpu:.2f}s")
# print total wall clock time
wall_clock = t[-1]
print(f"Total wall clock time: {wall_clock:.2f} s")
# print ratio
ratio = np.max(C)/t[-1]
print(f"Ratio (cpu time) / (wall clock time) : {ratio:.2f}")
print("********************************")
###################################
# #
# PLOTTING #
# #
###################################
if no_log: # True if user hasn't provided logfiles
# plot of total, max and mean memory
fig,ax = plt.subplots(dpi=125,facecolor="white")
ax.plot(t,M,'-k',label='total memory');
ax.hlines(np.mean(M),np.min(t),np.max(t),color='blue',linestyles='--',label='mean memory');
ax.hlines(np.max(M),np.min(t),np.max(t),color='red',linestyles='--',label='max memory');
ax.set_title(title)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Memory [MB]")
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.legend(prop={'size': 10},loc='best')
ax.grid();
# plot of total, max and mean CPU
fig1,ax1 = plt.subplots(dpi=125,facecolor="white")
ax1.plot(t,C,'-k',label='total cpu');
ax1.hlines(np.mean(C),np.min(t),np.max(t),color='blue',linestyles='--',label='mean cpu');
ax1.hlines(np.max(C),np.min(t),np.max(t),color='red',linestyles='--',label='max cpu');
ax1.set_title(title)
ax1.set_xlabel("Time [s]")
ax1.set_ylabel("CPU [s]")
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.legend(prop={'size': 10},loc='best');
ax1.grid()
plt.show();
else : # details about the PID exists (from logfiles)
# # convert to pid info lists to arrays
# pid_vals = np.asarray(pid_vals)
# pid_names = np.asarray(pid_names)
#
# # be sure of the correct ordering of pids
# pid_placement = np.where(pid_vals==PIDs)
# plot memory
fig,ax = plt.subplots(dpi=125,facecolor="white")
ax.plot(t,m);
# some features for the plot
ax.set_title(title)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Memory [MB]")
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.legend(pid_names,prop={'size': 10},loc='best')
ax.grid();
# plot cpu
fig1,ax1 = plt.subplots(dpi=125,facecolor="white")
ax1.plot(t,c);
# some features for the plot
ax1.set_title(title)
ax1.set_xlabel("Time [s]")
ax1.set_ylabel("CPU [s]")
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.legend(pid_names,prop={'size': 10},loc='best');
ax1.grid()
plt.show();
| gpl-3.0 |
roofit-dev/parallel-roofit-scripts | tensorflow_testing/tensorflow_roofit_demo_2-stripped.py | 1 | 11328 | # -*- coding: utf-8 -*-
# @Author: patrick
# @Date: 2016-09-01 17:04:53
# @Last Modified by: Patrick Bos
# @Last Modified time: 2016-10-06 07:30:16
# as per tensorflow styleguide
# https://www.tensorflow.org/versions/r0.11/how_tos/style_guide.html
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import time
def apply_constraint(var, constraints):
var_name = var.name[:var.name.find(':')]
# low = tf.constant(constraints[var_name][0], dtype=tf.float64)
# high = tf.constant(constraints[var_name][1], dtype=tf.float64)
low = constraints[var_name][0]
high = constraints[var_name][1]
return tf.assign(var, tf.clip_by_value(var, low, high),
name="assign_to_" + var_name)
# return tf.Variable(tf.clip_by_value(var, low, high), name=var_name + '_clipped')
project_dn = "/home/patrick/projects/apcocsm/"
# project_dn = "/home/pbos/apcocsm/"
m0_num = 5.291
argpar_num = -20.0
constraint = {}
constraint['sigmean'] = (5.20, 5.30)
constraint['sigwidth'] = (0.001, 1.)
constraint['argpar'] = (-100., -1.)
constraint['nsig'] = (0., 10000)
constraint['nbkg'] = (0., 10000)
constraint['mes'] = (5.20, 5.30)
pi = tf.constant(np.pi, dtype=tf.float64, name="pi")
sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi")
two = tf.constant(2, dtype=tf.float64, name="two")
one = tf.constant(1, dtype=tf.float64, name="one")
zero = tf.constant(0, dtype=tf.float64, name="zero")
def gaussian_pdf(x, mean, std):
val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std),
name="gaussian_pdf")
return val
def argus_pdf(m, m0, c, p=0.5):
t = m / m0
u = 1 - t * t
return tf.select(tf.greater_equal(t, one),
tf.zeros_like(m),
m * tf.pow(u, p) * tf.exp(c * u),
name="argus_pdf")
# return tf.cond(tf.greater_equal(t, one),
# lambda: zero,
# lambda: m * tf.pow(u, p) * tf.exp(c * u),
# name="argus_pdf")
# N.B.: bij cond moeten de argumenten functies zijn (zonder argumenten)
# zodat tf ze pas hoeft te callen / uit te rekenen als ze nodig zijn.
# Dat is dus bij select niet mogelijk, daar krijg je meteen beide hele
# tensors.
def argus_integral_phalf(m_low, m_high, m0, c):
"""
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
def F(x):
return -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(tf.sqrt(-c * x)))
a = tf.minimum(m_low, m0)
b = tf.minimum(m_high, m0)
x1 = 1 - tf.pow(a / m0, 2)
x2 = 1 - tf.pow(b / m0, 2)
area = tf.sub(F(x2), F(x1), name="argus_integral_phalf")
return area
def argus_integral_phalf_numpy(m_low, m_high, m0, c):
"""
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
import scipy.special
def F(x):
return -0.5 * m0 * m0 * (np.exp(c * x) * np.sqrt(x) / c + 0.5 / (-c)**1.5 * np.sqrt(np.pi) * scipy.special.erf(np.sqrt(-c * x)))
a = np.min([m_low, m0])
b = np.min([m_high, m0])
x1 = 1 - (a / m0)**2
x2 = 1 - (b / m0)**2
area = F(x2) - F(x1)
return area
argus_numerical_norm = tf.constant(argus_integral_phalf_numpy(constraint['mes'][0],
constraint['mes'][1],
m0_num, argpar_num),
dtype=tf.float64, name="argus_numerical_norm")
def argus_pdf_phalf_WN(m, m0, c, m_low, m_high, tf_norm=tf.constant(False)):
"""
WN: with normalization
tf_norm: use the tensorflow integral function (True) or the numpy one (False)
"""
norm = tf.cond(tf_norm,
lambda: argus_integral_phalf(m_low, m_high, m0, c),
lambda: argus_numerical_norm, name="argus_norm")
# norm = argus_numerical_norm
# norm = argus_integral_phalf(m_low, m_high, m0, c)
return argus_pdf(m, m0, c) / norm
# // --- Observable ---
# RooRealVar mes("mes","m_{ES} (GeV)",5.20,5.30) ;
# // --- Build Gaussian signal PDF ---
# RooRealVar sigmean("sigmean","B^{#pm} mass",5.28,5.20,5.30) ;
# RooRealVar sigwidth("sigwidth","B^{#pm} width",0.0027,0.001,1.) ;
sigmean = tf.Variable(np.float64(5.28), name="sigmean")
sigwidth = tf.Variable(np.float64(0.0027), name="sigwidth")
# RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ;
# // --- Build Argus background PDF ---
# RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ;
# RooConstVar m0("m0", "resonant mass", 5.291);
argpar = tf.Variable(np.float64(argpar_num), name="argpar")
m0 = tf.constant(np.float64(m0_num), name="m0")
# RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ;
# // --- Construct signal+background PDF ---
# RooRealVar nsig("nsig","#signal events",200,0.,10000) ;
# RooRealVar nbkg("nbkg","#background events",800,0.,10000) ;
nsig = tf.Variable(np.float64(200), name="nsig")
nbkg = tf.Variable(np.float64(800), name="nbkg")
# RooAddPdf sum("sum","g+a",RooArgList(gauss,argus),RooArgList(nsig,nbkg)) ;
# // --- Generate a toyMC sample from composite PDF ---
# RooDataSet *data = sum.generate(mes,2000) ;
def sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high):
add = tf.add(nsig * gaussian_pdf(mes, sigmean, sigwidth),
nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high),
name="sum_pdf")
return tf.div(add, nsig + nbkg, name="sum_pdf_normalized")
# data in RooFit genereren en importeren
# draai dit in ROOT:
# data.write("roofit_demo_random_data_values.dat");
data_raw = np.loadtxt(project_dn + "roofit_demo_random_data_values.dat",
dtype=np.float64)
data = tf.constant(data_raw, name='event_data')
# // --- Perform extended ML fit of composite PDF to toy data ---
# sum.fitTo(*data,"Extended") ;
# convert to tf constants, otherwise you'll get complaints about float32s...
for key in constraint.keys():
low = constraint[key][0]
high = constraint[key][1]
constraint[key] = (tf.constant(low, dtype=tf.float64),
tf.constant(high, dtype=tf.float64))
# nll = tf.neg(tf.reduce_sum(tf.log(tf.map_fn(lambda mes: sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint['mes'][0], constraint['mes'][1]), data))), name="nll")
print("N.B.: using direct data entry")
nll = tf.neg(tf.reduce_sum(tf.log(sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint['mes'][0], constraint['mes'][1]))), name="nll")
# print("N.B.: using unsummed version of nll! This appears to be the way people minimize cost functions in tf...")
# nll = tf.neg(tf.log(sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint['mes'][0], constraint['mes'][1])), name="nll")
# grad = tf.gradients(nll, [mu, sigma])
max_steps = 1000
status_every = 100
# sigmean_c = apply_constraint(sigmean, constraint)
# sigwidth_c = apply_constraint(sigwidth, constraint)
# argpar_c = apply_constraint(argpar, constraint)
# nsig_c = apply_constraint(nsig, constraint)
# nbkg_c = apply_constraint(nbkg, constraint)
# update_vars = [sigmean_c, sigwidth_c, argpar_c, nsig_c, nbkg_c]
variables = tf.all_variables()
# Create an optimizer with the desired parameters.
# opt = tf.train.GradientDescentOptimizer(learning_rate=0.001)
# opt = tf.train.AdagradOptimizer(learning_rate=0.1)
opt = tf.train.AdamOptimizer()
opt_op = opt.minimize(nll)
tf.scalar_summary('nll', nll)
init_op = tf.initialize_all_variables()
# check_op = tf.report_uninitialized_variables()
# start session
with tf.Session() as sess:
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
summarize_merged = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter('./train_%i' % int(time.time()), sess.graph)
# Run the init operation.
sess.run(init_op)
# print sess.run(init_op)
# print sess.run(check_op)
true_vars = {}
for v in variables:
key = v.name[:v.name.find(':')]
true_vars[key] = v.eval()
true_vars['m0'] = m0.eval()
print("name\t" + "\t".join([v.name.ljust(10) for v in variables]) + "\t | nll\t\t\t | step")
print("init\t" + "\t".join(["%6.4e" % v for v in sess.run(variables)]) + "\t | %f" % sess.run(nll))
print
start = timer()
for step in xrange(max_steps):
# print "variables 3:", sess.run(variables)
summary, _ = sess.run([summarize_merged, opt_op])
# sess.run([opt_op])
summary_writer.add_summary(summary, step)
if step % status_every == 0:
var_values_opt = sess.run(variables)
nll_value_opt = sess.run(nll)
# sess.run(update_vars)
# var_values_clip = np.array(sess.run(variables))
# nll_value_clip = np.array(sess.run(nll))
print("opt\t" + "\t".join(["%6.4e" % v for v in var_values_opt]) + "\t | %f\t | %i" % (nll_value_opt, step))
# clipped = np.where(var_values_opt == var_values_clip, [" "*10] * len(variables), ["%6.4e" % v for v in var_values_clip])
# print "clip\t" + "\t".join(clipped) + "\t | %f" % nll_value_clip
end = timer()
print("Loop took %f seconds" % (end - start))
raise Exception
fit_vars = {}
for v in variables:
key = v.name[:v.name.find(':')]
fit_vars[key] = v.eval()
fit_vars['m0'] = m0.eval()
counts, bins = np.histogram(data.eval(), bins=100)
x_bins = (bins[:-1] + bins[1:]) / 2
y_fit = [sum_pdf(x, mes_low=constraint['mes'][0], mes_high=constraint['mes'][1], **fit_vars).eval() for x in x_bins]
argus_fit = [fit_vars['nbkg'] * argus_pdf_phalf_WN(x, fit_vars['m0'], fit_vars['argpar'], m_low=constraint['mes'][0], m_high=constraint['mes'][1]).eval() for x in x_bins]
y_true = [sum_pdf(x, mes_low=constraint['mes'][0], mes_high=constraint['mes'][1], **true_vars).eval() for x in x_bins]
# normalize fit values to data counts
y_fit_norm = np.sum(counts) / np.sum(y_fit)
y_fit = [y * y_fit_norm for y in y_fit]
# argus_fit_norm = np.sum(counts) / np.sum(argus_fit)
argus_fit = [a * y_fit_norm for a in argus_fit]
y_true_norm = np.sum(counts) / np.sum(y_true)
y_true = [y * y_true_norm for y in y_true]
plt.errorbar(x_bins, counts, yerr=np.sqrt(counts), fmt='.g')
plt.plot(x_bins, y_fit, '-b')
plt.plot(x_bins, argus_fit, '--b')
plt.plot(x_bins, y_true, ':k')
plt.show()
# tf.InteractiveSession()
# sess = tf.Session()
# sess.run(init_op)
# opt = tf.train.GradientDescentOptimizer(learning_rate=1)
# opt_op = opt.minimize(nll, var_list=[sigmean, sigwidth, argpar, nsig, nbkg])
# for step in xrange(10):
# out = sess.run([opt_op, nll, sigmean, sigwidth, argpar, nsig, nbkg])
# print out[1:]
# sess.close()
# // --- Plot toy data and composite PDF overlaid ---
# RooPlot* mesframe = mes.frame() ;
# data->plotOn(mesframe) ;
# sum.plotOn(mesframe) ;
# sum.plotOn(mesframe,Components(argus),LineStyle(kDashed)) ;
# mesframe->Draw();
| apache-2.0 |
beiko-lab/gengis | bin/Lib/site-packages/mpl_toolkits/axes_grid/colorbar.py | 4 | 27721 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.collections as collections
import matplotlib.contour as contour
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Bbox
make_axes_kw_doc = '''
============= ====================================================
Property Description
============= ====================================================
*orientation* vertical or horizontal
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
============= ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g., '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
It is known that some vector graphics viewer (svg and pdf) renders white gaps
between segments of the colorbar. This is due to bugs in the viewers not
matplotlib. As a workaround the colorbar can be rendered with overlapping
segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However this has negative consequences in other circumstances. Particularly with
semi transparent images (alpha < 1) and colorbar extensions and is not enabled
by default see (issue #1188).
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
The transData of the *cax* is adjusted so that the limits in the
longest axis actually corresponds to the limits in colorbar range. On
the other hand, the shortest axis has a data limits of [1,2], whose
unconventional value is to prevent underflow when log scale is used.
''' % (make_axes_kw_doc, colormap_kw_doc)
docstring.interpd.update(colorbar_doc=colorbar_doc)
class CbarAxesLocator(object):
"""
CbarAxesLocator is a axes_locator for colorbar axes. It adjust the
position of the axes to make a room for extended ends, i.e., the
extended ends are located outside the axes area.
"""
def __init__(self, locator=None, extend="neither", orientation="vertical"):
"""
*locator* : the bbox returned from the locator is used as a
initial axes location. If None, axes.bbox is used.
*extend* : same as in ColorbarBase
*orientation* : same as in ColorbarBase
"""
self._locator = locator
self.extesion_fraction = 0.05
self.extend = extend
self.orientation = orientation
def get_original_position(self, axes, renderer):
"""
get the original position of the axes.
"""
if self._locator is None:
bbox = axes.get_position(original=True)
else:
bbox = self._locator(axes, renderer)
return bbox
def get_end_vertices(self):
"""
return a tuple of two vertices for the colorbar extended ends.
The first vertices is for min. end, and the second is for
max. end.
"""
# Note that concatenating two vertices needs to make a
# vertices for the frame.
extesion_fraction = self.extesion_fraction
corx = extesion_fraction*2.
cory = 1./(1. - corx)
x1, y1, w, h = 0, 0, 1, 1
x2, y2 = x1 + w, y1 + h
dw, dh = w*extesion_fraction, h*extesion_fraction*cory
if self.extend in ["min", "both"]:
bottom = [(x1, y1),
(x1+w/2., y1-dh),
(x2, y1)]
else:
bottom = [(x1, y1),
(x2, y1)]
if self.extend in ["max", "both"]:
top = [(x2, y2),
(x1+w/2., y2+dh),
(x1, y2)]
else:
top = [(x2, y2),
(x1, y2)]
if self.orientation == "horizontal":
bottom = [(y,x) for (x,y) in bottom]
top = [(y,x) for (x,y) in top]
return bottom, top
def get_path_patch(self):
"""
get the path for axes patch
"""
end1, end2 = self.get_end_vertices()
verts = [] + end1 + end2 + end1[:1]
return Path(verts)
def get_path_ends(self):
"""
get the paths for extended ends
"""
end1, end2 = self.get_end_vertices()
return Path(end1), Path(end2)
def __call__(self, axes, renderer):
"""
Return the adjusted position of the axes
"""
bbox0 = self.get_original_position(axes, renderer)
bbox = bbox0
x1, y1, w, h = bbox.bounds
extesion_fraction = self.extesion_fraction
dw, dh = w*extesion_fraction, h*extesion_fraction
if self.extend in ["min", "both"]:
if self.orientation == "horizontal":
x1 = x1 + dw
else:
y1 = y1+dh
if self.extend in ["max", "both"]:
if self.orientation == "horizontal":
w = w-2*dw
else:
h = h-2*dh
return Bbox.from_bounds(x1, y1, w, h)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
# artists
self.solids = None
self.lines = None
self.dividers = None
self.extension_patch1 = None
self.extension_patch2 = None
if orientation == "vertical":
self.cbar_axis = self.ax.yaxis
else:
self.cbar_axis = self.ax.xaxis
if format is None:
if isinstance(self.norm, colors.LogNorm):
# change both axis for proper aspect
self.ax.xaxis.set_scale("log")
self.ax.yaxis.set_scale("log")
self.ax._update_transScale()
self.cbar_axis.set_minor_locator(ticker.NullLocator())
formatter = ticker.LogFormatter()
else:
formatter = None
elif cbook.is_string_like(format):
formatter = ticker.FormatStrFormatter(format)
else:
formatter = format # Assume it is a Formatter
if formatter is None:
formatter = self.cbar_axis.get_major_formatter()
else:
self.cbar_axis.set_major_formatter(formatter)
if cbook.iterable(ticks):
self.cbar_axis.set_ticks(ticks)
elif ticks is not None:
self.cbar_axis.set_major_locator(ticks)
else:
self._select_locator(formatter)
self._config_axes()
self.update_artists()
self.set_label_text('')
def _get_colorbar_limits(self):
"""
initial limits for colorbar range. The returned min, max values
will be used to create colorbar solid(?) and etc.
"""
if self.boundaries is not None:
C = self.boundaries
if self.extend in ["min", "both"]:
C = C[1:]
if self.extend in ["max", "both"]:
C = C[:-1]
return min(C), max(C)
else:
return self.get_clim()
def _config_axes(self):
'''
Adjust the properties of the axes to be adequate for colorbar display.
'''
ax = self.ax
axes_locator = CbarAxesLocator(ax.get_axes_locator(),
extend=self.extend,
orientation=self.orientation)
ax.set_axes_locator(axes_locator)
# override the get_data_ratio for the aspect works.
def _f():
return 1.
ax.get_data_ratio = _f
ax.get_data_ratio_log = _f
ax.set_frame_on(True)
ax.set_navigate(False)
self.ax.set_autoscalex_on(False)
self.ax.set_autoscaley_on(False)
if self.orientation == 'horizontal':
ax.xaxis.set_label_position('bottom')
ax.set_yticks([])
else:
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
def update_artists(self):
"""
Update the colorbar associated artists, *filled* and
*ends*. Note that *lines* are not updated. This needs to be
called whenever clim of associated image changes.
"""
self._process_values()
self._add_ends()
X, Y = self._mesh()
if self.filled:
C = self._values[:,np.newaxis]
self._add_solids(X, Y, C)
ax = self.ax
vmin, vmax = self._get_colorbar_limits()
if self.orientation == 'horizontal':
ax.set_ylim(1, 2)
ax.set_xlim(vmin, vmax)
else:
ax.set_xlim(1, 2)
ax.set_ylim(vmin, vmax)
def _add_ends(self):
"""
Create patches from extended ends and add them to the axes.
"""
del self.extension_patch1
del self.extension_patch2
path1, path2 = self.ax.get_axes_locator().get_path_ends()
fc=mpl.rcParams['axes.facecolor']
ec=mpl.rcParams['axes.edgecolor']
linewidths=0.5*mpl.rcParams['axes.linewidth']
self.extension_patch1 = PathPatch(path1,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.extension_patch2 = PathPatch(path2,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.ax.add_artist(self.extension_patch1)
self.ax.add_artist(self.extension_patch2)
def _set_label_text(self):
"""
set label.
"""
self.cbar_axis.set_label_text(self._label, **self._labelkw)
def set_label_text(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label_text()
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.extend in ["min", "both"]:
cc = self.to_rgba([C[0][0]])
self.extension_patch1.set_fc(cc[0])
X, Y, C = X[1:], Y[1:], C[1:]
if self.extend in ["max", "both"]:
cc = self.to_rgba([C[-1][0]])
self.extension_patch2.set_fc(cc[0])
X, Y, C = X[:-1], Y[:-1], C[:-1]
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha,
}
del self.solids
del self.dividers
col = self.ax.pcolor(*args, **kw)
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],),
)
self.ax.add_collection(self.dividers)
else:
self.dividers = None
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar. It deletes preexisting lines.
'''
del self.lines
N = len(levels)
x = np.array([1.0, 2.0])
X, Y = np.meshgrid(x,levels)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths,
)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _select_locator(self, formatter):
'''
select a suitable locator
'''
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator(nbins=5)
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b) #, nbins=10)
self.cbar_axis.set_major_locator(locator)
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v = np.arange(self.cmap.N, dtype=np.int16)
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = np.array(self.norm.boundaries)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v = 0.5*(bi[:-1] + bi[1:])
self._boundaries = b
self._values = v
return
else:
b = self._uniform_y(self.cmap.N+1)
self._process_values(b)
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries.
'''
vmin, vmax = self._get_colorbar_limits()
if isinstance(self.norm, colors.LogNorm):
y = np.logspace(np.log10(vmin), np.log10(vmax), N)
else:
y = np.linspace(vmin, vmax, N)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([1.0, 2.0])
if self.spacing == 'uniform':
y = self._uniform_y(len(self._boundaries))
else:
y = self._boundaries
self._y = y
X, Y = np.meshgrid(x,y)
return X, Y
def set_alpha(self, alpha):
"""
set alpha value.
"""
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
"""
Update the colorbar artists to reflect the change of the
associated mappable.
"""
self.update_artists()
if isinstance(mappable, contour.ContourSet):
if not mappable.filled:
self.add_lines(mappable)
@docstring.Substitution(make_axes_kw_doc)
def make_axes(parent, **kw):
'''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
'''
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
def colorbar(mappable, cax=None, ax=None, **kw):
"""
Create a colorbar for a ScalarMappable instance.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if cax is None:
cax, kw = make_axes(ax, **kw)
cax.hold(True)
cb = Colorbar(cax, mappable, **kw)
def on_changed(m):
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
ax.figure.sca(ax)
return cb
| gpl-3.0 |
DiamondLightSource/auto_tomo_calibration-experimental | measure_resolution/lmfit-py/lmfit/ui/__init__.py | 7 | 1032 | # These variables are used at the end of the module to decide
# which BaseFitter subclass the Fitter will point to.
import warnings
has_ipython, has_matplotlib = False, False
try:
import matplotlib
except ImportError:
pass
else:
has_matplotlib = True
try:
import IPython
except ImportError:
pass
else:
_ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
_ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
try:
if IPython.release.version_info[0] < 2:
warnings.warn(_ipy_msg1)
else:
# has_ipython = iPython installed and we are in an IPython session.
has_ipython = IPython.get_ipython() is not None
except Exception as e:
warnings.warn(_ipy_msg2)
from .basefitter import BaseFitter
Fitter = BaseFitter
if has_matplotlib:
from .basefitter import MPLFitter
Fitter = MPLFitter
if has_ipython:
from .ipy_fitter import NotebookFitter
Fitter = NotebookFitter
| apache-2.0 |
klocey/DiversityTools | StatPak/StatPak.py | 1 | 3852 | # -*- coding: utf-8 -*-
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
import random
import scipy as sc
import os
import sys
import pandas
from pandas.tools import plotting
from scipy import stats
import statsmodels
from statsmodels.formula.api import ols
from numpy.random import randn
""" http://statsmodels.sourceforge.net/devel/stats.html#residual-diagnostics-and-specification-tests """
def NormStats(resids):
DW = statsmodels.stats.stattools.durbin_watson(resids, axis=0) # Calculate the Durbin-Watson statistic for normality
JB = statsmodels.stats.stattools.jarque_bera(resids, axis=0) # Calculate the Jarge-Bera test
Omni = statsmodels.stats.stattools.omni_normtest(resids, axis=0) # Calculate the Omnibus test for normal skewnness and kurtosis
NormAd = statsmodels.stats.diagnostic.normal_ad(x, axis=0) # Anderson-Darling test for normal distribution unknown mean and variance
KSnorm = statsmodels.stats.diagnostic.kstest_normal(x, pvalmethod='approx') # Lillifors test for normality, Kolmogorov Smirnov test with estimated mean and variance
Lfor = statsmodels.stats.diagnostic.lillifors(x, pvalmethod='approx') # Lillifors test for normality, Kolmogorov Smirnov test with estimated mean and variance
return
def AutoCorrStats(x, results, lags=None, nlags=None, store=False, boxpierc=False):
Lj = statsmodels.stats.diagnostic.acorr_ljungbox(x, lags=None, boxpierce=False) # Calculate the Ljung-Box test for no autocorrelation
BG = statsmodels.stats.diagnostic.acorr_breush_godfrey(results, nlags=None, store=False) # Calculate the Breush Godfrey Lagrange Multiplier tests for residual autocorrelation
return
def ResidStat(resid, exog_het):
HB = statsmodels.stats.diagnostic.het_breushpagan(resid, exog_het) # The tests the hypothesis that the residual variance does not depend on the variables in x in the form
return
def HetScedStats(resid, exog):
HW = statsmodels.stats.diagnostic.het_white(resid, exog, retres=False) # White’s Lagrange Multiplier Test for Heteroscedasticity
HA = statsmodels.stats.diagnostic.het_arch(resid, maxlag=None, autolag=None, store=False, regresults=False, ddof=0) # Engle’s Test for Autoregressive Conditional Heteroscedasticity (ARCH)
return
def Linearity(res, resid, exog, olsresidual, olsresults):
LH = statsmodels.stats.diagnostic.linear_harvey_collier(res) # Harvey Collier test for linearity. The Null hypothesis is that the regression is correctly modeled as linear.
LR = statsmodels.stats.diagnostic.linear_rainbow(res, frac=0.5) # Rainbow test for linearity, The Null hypothesis is that the regression is correctly modelled as linear. The alternative for which the power might be large are convex, check.
Llm = statsmodels.stats.diagnostic.linear_lm(resid, exog, func=None) # Lagrange multiplier test for linearity against functional alternative
Bcusum = statsmodels.stats.diagnostic.breaks_cusumolsresid(olsresidual, ddof=0) # cusum test for parameter stability based on ols residuals
BH = statsmodels.stats.diagnostic.breaks_hansen(olsresults) # test for model stability, breaks in parameters for ols, Hansen 1992
Rols = statsmodels.stats.diagnostic.recursive_olsresiduals(olsresults, skip=None, lamda=0.0, alpha=0.95) # calculate recursive ols with residuals and cusum test statistic
def Outliers(results):
#class statsmodels.stats.outliers_influence.OLSInfluence(results)
OutInf = statsmodels.stats.outliers_influence.OLSInfluence(results)
return
""" Incomplete
# HGQ = statsmodels.stats.diagnostic.HetGoldfeldQuandt # function is not complete in statsmodels documentation
# ComCox = class statsmodels.stats.diagnostic.CompareCox # Cox Test for non-nested models
"""
| mit |
jahanzebk/python-text-classifier | main.py | 1 | 4530 | import sys
import FileHandler as FH, Classifier, EntityExtractor as EE
from TextProcessor import TextProcessor as TP
import Document as Doc
from nltk import classify
from sklearn.externals import joblib
from sklearn.naive_bayes import MultinomialNB
def main():
# Initialize Directory paths and variables
docsDir = "docs/"
logFile = "log.txt"
try:
userId = sys.argv[0] # send via PHP exec()
userDir = docsDir + userId + "/" # docs/userId/
except IndexError:
msg = "No User Id given."
myFH = FH.FileHandler()
myFH.makeLog(logFile, msg)
sys.exit(msg)
try:
currSetId = sys.argv[0] # send via PHP exec()
currSetDir = userDir + currSetId + "/" # docs/userId/setname/
except IndexError:
msg = "No Set Id given."
myFH = FH.FileHandler()
myFH.makeLog(logFile, msg)
sys.exit(msg)
#open whole set in another function
fullAnalysis(currSetDir);
def fullAnalysisOLD(currSetDir):
dir = "docs/testing/" # actual dir will be build from args (docs/userID/setID dir)
nbClassifier = Classifier.Classifier()
myFH = FH.FileHandler()
#nbClassifier.NLTK_NB_sentiTrain()
#Load test documents
testDocs = myFH.loadDirs(dir, True);
[doc.nbPrepare() for doc in testDocs]
testFeatureSet = [(doc.features, doc.category) for doc in testDocs]
#Classify documents
#classifier = nbClassifier.nbLoadTrainer()
classifier = nbClassifier.nbTrain('docs/training/')
#print classifier.classify(testFeatureSet)
print "Accuracy: "
print classify.accuracy(classifier, testFeatureSet)
classifier.show_most_informative_features(10)
def fullAnalysis(currSetDir):
test_SK_NB_classification()
def test_SK_NB_classification():
dir = "docs/testing/" # actual dir will be build from args (docs/userID/setID dir)
nbClassifier = Classifier.Classifier()
myFH = FH.FileHandler()
#Load test documents
testDocs = myFH.loadDirs(dir, True);
docCats = [doc.category for doc in testDocs]
[tfidfVec, tfidfs] = TP.SK_NB_calcTFIDFs(testDocs)
uniqueCats = nbClassifier.uniqify(docCats)
print "Loading Classifier."
clf = joblib.load("pickles/SK_NB/SK_classifier.pkl")
# nbClassifier.printImpWords(tfidfVec, clf, None, clf.classes_, 50)
# nbClassifier.showMistakes(clf, testDocs, tfidfs, clf.classes_)
# print "Accuracy: "
# print clf.score(tfidfs, docCats)
nbClassifier.SK_NB_accuracy(clf, tfidfVec, None, True, True, uniqueCats)
# todo list:
# ================== DONE ================= organize pickles
# find precision recall and f score in accuracy functions and maybe try use it to make better decisions
# decide how to move on based on fscore results (imbalanced) and wikipedia db
# cleanup project and make sub folders
# use pipeline instead
# look into the hashingVector big data problem
# figure out with partial_fit
# make passive agressive, test it
# then fix up NLTK classifiers make sure they work with joblib,
# look into a hiearichal classification with partial_fit()
# give accuracy on a test set, keep sperate test sets eg first10daysofJuly
# make a proper training data thing
# a classify_unlabelled function that will run for the acutal app
# use nltk features to test and increase accuracy e.g stemming, collocations
# make a function that checks accuracy by setting a documents class to itsmost probable 2/3
# look into a hiearichal classification with partial_fit()
# be able to search which category a word probably belongs to
# then fit in bhais sentiment analysis
# make entiityExtraction after all these
#
# ================== DONE ================= make trainDocs in classifier not an option, just class_labels
# ================== DONE ================= also, optimize training function to write classifier pkl file, remove it from RAM, then make tfidf pkl
# ================== DONE ================= testing function before writing both pickle files that returns accuracy and confirms if you want to write it
# ================== DONE ================= also check most useful features of the classifier and check how useful game of thrones is as a feature
# ================== DONE ================= also, crawl for seperate test data and document code
if (__name__ == "__main__"):
main() | mit |
theodoregoetz/histogram | histogram/histogram.py | 1 | 50610 | from __future__ import division, unicode_literals
from builtins import str
from six import string_types, text_type
import itertools as it
from collections import Iterable
from copy import copy, deepcopy
from numbers import Integral
from warnings import warn
import numpy as np
from scipy import optimize as opt
from scipy import stats, ndimage, interpolate
from uncertainties import nominal_value, std_dev, ufloat
from uncertainties import unumpy as unp
from .histogram_axis import HistogramAxis
from .detail import skippable, window
from . import rc
# ignore divide by zero (silently create nan's)
np.seterr(divide='ignore', invalid='ignore')
class Histogram(object):
"""N-dimensional histogram over a continuous range.
This is a histogram where each axis is a continuous (non-discrete) range
with a set number of bins. The binning does not have to be evenly spaced.
Args:
axes (list): List of :py:class:`HistogramAxis` or constructor
parameters thereof. These are the axis definitions.
Keyword Args:
label (str): Label for the filled data.
title (str): Title of this histogram.
data (scalar array): N-dimensional array for the filled data.
uncert (scalar array): N-dimensional array for the uncertainty.
dtype (scalar type): Type of the data array. Input data will be
converted if different.
Example:
Typical usage would be to fill the histogram from a sample of data. In
this example, we create a 1D histogram with 100 bins from 0 to 10, and
fill it with 10k samples distributed normally around 5 with a width
(sigma) of 1::
import numpy as np
from matplotlib import pyplot
from histogram import Histogram
h = Histogram(100, [0, 10], 'x (cm)', 'counts', 'Random Distribution')
h.fill(np.random.normal(5, 1, 10000))
fig, ax = pyplot.subplots(figsize=(4, 2.5))
fig.subplots_adjust(left=.18, bottom=.2, right=.95, top=.88)
pt = ax.plothist(h, color='steelblue')
pyplot.show()
.. image:: images/histogram_1dnorm.png
"""
def __init__(self, *axes, **kwargs):
label = kwargs.pop('label' , None)
title = kwargs.pop('title' , None)
data = kwargs.pop('data' , None)
dtype = kwargs.pop('dtype' , None)
uncert = kwargs.pop('uncert', None)
if not axes:
raise TypeError('you must specify at least one axis.')
self.axes = []
for skip, (arg0, arg1, arg2) in skippable(window(axes, size=3)):
if (isinstance(arg0, Iterable) and
not isinstance(arg0, string_types)):
try:
arg0_array = np.asarray(arg0)
if (arg0_array.dtype == object) or (len(arg0_array.shape) != 1):
self.axes.append(HistogramAxis(*arg0))
elif isinstance(arg1, string_types):
self.axes.append(HistogramAxis(arg0_array, arg1))
skip(1)
else:
self.axes.append(HistogramAxis(arg0_array))
except ValueError:
self.axes.append(HistogramAxis(*arg0))
elif isinstance(arg0, Integral):
if isinstance(arg2, string_types):
self.axes.append(HistogramAxis(arg0, arg1, arg2))
skip(2)
else:
self.axes.append(HistogramAxis(arg0, arg1))
skip(1)
elif isinstance(arg0, HistogramAxis):
self.axes.append(arg0)
else:
assert isinstance(arg0, string_types) or arg0 is None
assert isinstance(arg1, string_types) or arg1 is None
assert arg2 is None
for a in (arg0, arg1):
if isinstance(a, string_types):
if label is None:
label = a
elif title is None:
title = a
else:
raise TypeError('bad argument list')
skip(1)
self.label = label
self.title = title
shape = tuple(ax.nbins for ax in self.axes)
if data is None:
self._data = np.zeros(shape=shape, dtype=(dtype or rc.fill_type))
else:
self._data = np.asarray(data)
if dtype is not None:
self._data = self._data.astype(dtype)
assert self._data.shape == shape, 'Data shape must match axes.'
if uncert is not None:
self.uncert = uncert
axes = []
"""`list` of `HistogramAxis` objects which detail binning along the axes.
"""
### properties
@property
def data(self):
""":py:class:`numpy.ndarray` of the filled data.
The indexes are in the same order as the :py:class:`HistogramAxis`
objects in the list stored in :py:attr:`Histogram.axes`. One can set
this directly - shape is checked and data is written "in-place" when
possible.
Here, we create a histogram and set the data directly::
from scipy import stats
from matplotlib import pyplot
from histogram import Histogram
h = Histogram(50, [0, 10])
xx, = h.grid
h.data = 1000 * stats.norm(5, 2).pdf(xx)
fig, ax = pyplot.subplots(figsize=(4, 2.5))
fig.subplots_adjust(left=.15, bottom=.2, right=.9, top=.85)
pt = ax.plothist(h, color='steelblue')
pyplot.show()
.. image:: images/histogram_data_1dnorm.png
"""
return self._data
@data.setter
def data(self, d):
self._data[...] = d
@property
def has_uncert(self):
return hasattr(self, '_uncert')
@property
def uncert(self):
""":py:class:`numpy.ndarray` of the absolute uncertainty.
This has the same shape as :py:attr:`Histogram.data`. Under certain
cases, this will be set automatically to the square-root of the data
(Poisson statistics assumption).
When histogramming a sample of randomly distributed data, the
uncertainty of each bin is the equal to the square-root of the counts
in that bin::
import numpy as np
from scipy import stats
from numpy import random as rand
from matplotlib import pyplot
from histogram import Histogram
rand.seed(1)
h = Histogram(30, [0, 10])
xx, = h.grid
h.data = 1000 * stats.norm(5, 2).pdf(xx)
h.data += rand.normal(0, 10, xx.shape)
h.uncert = np.sqrt(h.data)
fig, ax = pyplot.subplots(figsize=(4, 2.5))
fig.subplots_adjust(left=.15, bottom=.2, right=.9, top=.85)
pt = ax.plothist(h, style='errorbar')
pyplot.show()
.. image:: images/histogram_uncert_1dnorm.png
"""
return getattr(self, '_uncert', np.sqrt(self.data))
@uncert.setter
def uncert(self, u):
if u is None:
del self.uncert
else:
if not self.has_uncert:
self._uncert = np.empty(self.data.shape, dtype=np.float64)
self._uncert[...] = u
@uncert.deleter
def uncert(self):
if self.has_uncert:
del self._uncert
@property
def uncert_ratio(self):
"""The untertainty as ratios of the data"""
return self.uncert / self.data
@uncert_ratio.setter
def uncert_ratio(self, u):
"""Set the uncertainty by ratio of the data"""
self.uncert = u * self.data
@property
def title(self):
"""Title of this histogram."""
return getattr(self, '_title', None)
@title.setter
def title(self, t):
if t is None:
del self.title
else:
self._title = text_type(t)
@title.deleter
def title(self):
if hasattr(self, '_title'):
del self._title
@property
def label(self):
"""Label of the filled data in this histogram
This is usually something like "counts"."""
return getattr(self, '_label', None)
@label.setter
def label(self, l):
if l is None:
del self.label
else:
self._label = text_type(l)
@label.deleter
def label(self):
if hasattr(self, '_label'):
del self._label
def __eq__(self, that):
"""Check if data and axes are equal.
Uncertainty, labels and titles are not considered. For complete
equality, see :py:meth:`Histogram.isidentical`. For finer control of
histogram comparison, consider using :py:func:`numpy.allclose()` on the
data::
import numpy as np
from histogram import Histogram
h1 = Histogram(10, (0, 10))
h2 = Histogram(10, (0, 10))
h3 = Histogram(10, (-10, 10))
h1.data[2] = 1
h2.data[2] = 1
h3.data[3] = 1
# checks data and axes:
assert h1 == h2
assert not (h1 == h3)
# checks only data:
assert np.allclose(h1.data, h2.data)
"""
try:
if not np.allclose(self.data, that.data):
return False
for a, aa in zip(self.axes, that.axes):
if not (a == aa):
return False
except ValueError:
# histogram data shape mismatch
return False
return True
def __ne__(self, that):
return not (self == that)
def isidentical(self, that):
"""Check if histograms are identical including uncertainty and labels.
See also :py:meth:`Histogram.__eq__`."""
if not (self == that):
return False
if self.has_uncert or that.has_uncert:
if not np.allclose(self.uncert, that.uncert):
return False
if self.label != that.label:
return False
if self.title != that.title:
return False
for a, aa in zip(self.axes, that.axes):
if not a.isidentical(aa):
return False
return True
### non-modifying information getters
def __str__(self):
"""Breif string representation of the data.
Returns the string representation of the numpy array containing the
data only. Axes, uncertainty and labels are ignored.
Example::
from numpy import random as rand
from histogram import Histogram
rand.seed(1)
h = Histogram(10, [0, 10])
h.fill(rand.normal(5, 2, 10000))
print(h)
output::
[ 164 428 909 1484 1915 1934 1525 873 467 175]
"""
return str(self.data)
def __repr__(self):
"""Complete string representation of the histogram"""
fmt = 'Histogram({axes}, {args})'
axesstr = ', '.join(repr(a) for a in self.axes)
args = {
'data': repr(self.data.tolist()),
'dtype':'"{}"'.format(str(self.data.dtype)) }
if self.label is not None:
args['label'] = '"{}"'.format(self.label)
if self.title is not None:
args['title'] = '"{}"'.format(self.title)
if self.has_uncert:
args['uncert'] = str(self.uncert.tolist())
argsstr = ', '.join('{}={}'.format(k, v)
for k, v in sorted(args.items()))
return fmt.format(axes=axesstr, args=argsstr)
def __call__(self, *xx, **kwargs):
"""Value of histogram at a point
Returns the value of the histogram at a specific point ``(x, y...)`` or
array of points ``(xx, yy...)``.
Args:
xx (tuple of numbers or arrays): Point(s) inside the axes of this
histogram.
Keyword Args:
overflow_value (number): Return value when the point lies outside
this histogram. (default: 0)
Example::
from numpy import random as rand
from histogram import Histogram
rand.seed(1)
h = Histogram(10, [0, 10])
h.fill(rand.normal(5, 2, 10000))
for x in [0.5, 1.5, 2.5]:
print( h(x) )
output::
164
428
909
"""
overflow_value = kwargs.pop('overflow_value', 0)
bin = []
for x, ax in zip(xx, self.axes):
b = ax.bin(x)
if (b < 0) or (b >= ax.nbins):
return overflow_value
bin += [b]
return self.data[tuple(bin)]
def asdict(self, encoding=None, flat=False):
"""Dictionary representation of this histogram.
This includes uncertainty, axes, labels and title and is used to
serialize the histogram to NumPy's binary format (see
:py:func:`save_histogram_to_npz`).
"""
ret = {'data' : self.data}
if flat:
for i, ax in enumerate(self.axes):
for k, v in ax.asdict(encoding).items():
key = 'axes:{}:{}'.format(i, k)
ret[key] = v
else:
ret['axes'] = [a.asdict(encoding) for a in self.axes]
if self.has_uncert:
ret['uncert'] = self.uncert
if self.label is not None:
if encoding is not None:
ret['label'] = self.label.encode(encoding)
else:
ret['label'] = self.label
if self.title is not None:
if encoding is not None:
ret['title'] = self.title.encode(encoding)
else:
ret['title'] = self.title
return ret
@staticmethod
def fromdict(d, encoding=None):
"""Create new :py:class:`Histogram` from a dictionary."""
if 'axes' in d:
axes = [HistogramAxis.fromdict(a, encoding) for a in d.pop('axes')]
else:
axes = []
for i in range(d['data'].ndim):
axdict = {}
for k in ['edges', 'label']:
key = 'axes:{}:{}'.format(i, k)
if key in d:
axdict[k] = d.pop(key)
axes.append(axdict)
axes = [HistogramAxis.fromdict(a, encoding) for a in axes]
if encoding is not None:
if 'label' in d:
d['label'] = d['label'].decode(encoding)
if 'title' in d:
d['title'] = d['title'].decode(encoding)
return Histogram(*axes, **d)
### dimension and shape
@property
def dim(self):
"""Dimension of this histogram (number of axes)"""
return len(self.axes)
@property
def shape(self):
"""Shape of the histogram data
This is a tuple of the number of bins in each axis ``(x, y...)``. This
is the same as :py:attr:`Histogram.data.shape`.
"""
return self.data.shape
@property
def size(self):
"""Total number of bins (size of data)
This is the product of the number of bins in each axis. This is the
same as :py:meth:`Histogram.data.size`.
"""
return self.data.size
### axes information (bin edges and centers)
def isuniform(self, rtol=1e-05, atol=1e-08):
"""Check if all axes are uniform
Returns "and" of :py:meth:`HistogramAxis.isuniform` for each axis.
"""
return all([ax.isuniform(rtol=rtol, atol=atol) for ax in self.axes])
@property
def edges(self):
"""Edges of each axis as a tuple
Output is in the form::
( [x0, x1..], [y0, y1...] ... )
"""
return tuple([ax.edges for ax in self.axes])
def grid(self):
"""Meshgrid of the.bincenters() of each axis
This is a single array for 1D histograms - i.e. the bin-centers of the
x-axis. For 2D histograms, this is a tuple of two 2D arrays::
XX, YY = h2.grid
Here, ``XX`` and ``YY`` are arrays of shape ``(xbins, ybins)``. For 1D
histograms, the output is still a tuple so typically, one should expand
this out with a comma::
xx, = h1.grid
"""
if self.dim == 1:
return (self.axes[0].bincenters(), )
else:
centers = [ax.bincenters() for ax in self.axes]
return np.meshgrid(*centers, indexing='ij')
def edge_grid(self):
"""Meshgrid built from the axes' edges
This is the same as :py:meth:`Histogram.grid` but for the edges of each
axis instead of the bin centers.
"""
if self.dim == 1:
return (self.axes[0].edges, )
else:
edges = [ax.edges for ax in self.axes]
return np.meshgrid(*edges, indexing='ij')
def binwidths(self):
"""Widths of all bins along each axis
This will always return a tuple::
dx, dy = h2.binwidths()
Here, ``dx`` and ``dy`` are arrays of the widths of each bin along the
`x` and `y` axes respecitively. For 1D histograms, the output is still
a tuple so typically, one should expand this out with a comma::
dx, = h1.binwidths()
"""
return tuple([ax.binwidths() for ax in self.axes])
def binwidth(self, b=1, axis=0):
"""Width of a specific bin ``b`` along an axis
Args:
b (int): Bin index (from zero, default: 1).
axis (int): Axis index (default: 0).
Note:
Default is the second bin (index = 1) in the first (index = 0) axis.
"""
return self.axes[axis].binwidth(b)
def binvolumes(self):
"""Volumes of each bin
Volume is defined as the product of the bin-widths along each axis for the given bin. For 1D histogram, this is the same as :py:attr:`Histogram.binwidths()`. For 2D histograms, this returns a 2D array like the following where dxi is the width of the ith bin along the x-axis (first, index = 0)::
[ [ dx0*dy0, dx0*dy1 ... ],
[ dx1*dy0, dx1*dy1 ... ],
... ] = h2.binvolumes()
h.binvolumes()[i, j] = dxi * dyj
"""
widths = self.binwidths()
if self.dim == 1:
return widths
else:
return np.multiply.reduce(np.ix_(*widths))
@property
def overflow_value(self):
"""Guaranteed overflow point when filling this histogram
For 1D histograms, this is a tuple of one value ``(x, )`` generated by
:py:attr:`HistogramAxis.overflow_value`. For 2D histograms, this will
look like ``(x, y)``.
Example::
from histogram import Histogram
ha = Histogram(10, [0, 10])
print(h)
ha.fill(ha.overflow_value)
print(h)
Output::
[0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0]
"""
return tuple(ax.overflow_value for ax in self.axes)
### data information (limits, sum, extent)
def sum_data(self, *axes):
"""Sum of bin values or sum along one or more axes."""
all_axes = tuple(range(self.dim))
axes = all_axes if len(axes) == 0 else tuple(sorted(axes))
if not self.has_uncert and axes == all_axes:
s = self.data.sum()
result = ufloat(s, np.sqrt(s))
else:
result = np.sum(unp.uarray(self.data, self.uncert), axis=axes)
return result
def sum(self, *axes):
"""Sum of bin values or sum along one or more axes.
Args:
axes (tuple of integers, optional): Axes to sum over.
Returns the sum over all values or sums only over specific axes and
returns a new :py:class:`Histogram` with reduced dimension.
Example::
from histogram import Histogram
h = Histogram(10, [0, 10])
h.fill([1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
print('sum of h:', h.sum())
h2 = Histogram(10, [0, 10], 10, [0, 10])
h2.fill([1, 2, 2, 3, 3, 3, 4, 4, 4, 4],
[1, 1, 1, 1, 1, 1, 2, 2, 2, 2])
print('h2 sum along axis 0:', h2.sum(0))
Output::
sum of h: 10
h2 sum along axis 0: [0 6 4 0 0 0 0 0 0 0]
"""
all_axes = tuple(range(self.dim))
axes = all_axes if len(axes) == 0 else tuple(sorted(axes))
if axes == all_axes:
return self.sum_data()
else:
if self.has_uncert:
result = self.sum_data(*axes)
newdata = unp.nominal_values(result)
newuncert = unp.std_devs(result)
else:
newdata = np.sum(self.data, axis=axes)
newuncert = None
ii = sorted(set(range(self.dim)) - set(axes))
newaxes = [self.axes[i] for i in ii]
return Histogram(*newaxes, data=newdata, uncert=newuncert,
title=copy(self.title), label=copy(self.label))
def projection_data(self, axis):
"""Projection of the data onto an axis."""
sumaxes = set(range(self.dim)) - {axis}
return self.sum_data(*sumaxes)
def projection(self, axis):
"""Projection onto a single axis."""
sumaxes = set(range(self.dim)) - {axis}
return self.sum(*sumaxes)
def integral(self):
"""Total volume-weighted sum of the histogram."""
res = np.sum(unp.uarray(self.data, self.uncert) * self.binvolumes())
return nominal_value(res), std_dev(res)
def min(self):
"""Minimum value of the filled data including uncertainty."""
return np.nanmin(self.data - self.uncert)
def max(self):
"""Maximum value of the filled data including uncertainty."""
return np.nanmax(self.data + self.uncert)
def mean(self):
"""Mean position of the data along the axes
Returns:
tuple: Mean (float) along each axis: ``(xmean, ymean...)``.
Bin-centers are used as the position and non-equal widths are
incorporated into the weighting of the results.
"""
mean = []
for i, axis in enumerate(self.axes):
if self.dim > 1:
w = self.projection_data(i)
else:
w = unp.uarray(self.data, self.uncert)
if axis.isuniform():
x = unp.uarray(axis.bincenters(), 0.5 * axis.binwidth())
else:
bw = axis.binwidths()
x = unp.uarray(axis.bincenters(), 0.5 * bw)
w *= bw
mean.append(np.sum(x * w) / np.sum(w))
return tuple(mean)
def var(self):
"""Variance of the data along the axes
Returns:
tuple: Variance (float) along each axis: ``(xvar, yvar...)``.
This will ignore all ``nan`` and ``inf`` values in the histogram.
Bin-centers are used as the position and non-equal widths are
incorporated into the weighting of the results.
"""
var = []
for i, (axis, mean) in enumerate(zip(self.axes, self.mean())):
if self.dim > 1:
w = self.projection_data(i)
else:
w = unp.uarray(self.data, self.uncert)
if axis.isuniform():
x = unp.uarray(axis.bincenters(), 0.5 * axis.binwidth())
else:
bw = axis.binwidths()
x = unp.uarray(axis.bincenters(), 0.5 * bw)
w *= bw
sum_w = np.sum(w)
mean = np.sum(w * x) / sum_w
var.append(np.sum(w * (x - mean)**2) / sum_w)
return tuple(var)
def std(self):
"""Standard deviation of the data along the axes
Returns:
tuple: Standard deviation (float) along each axis: ``(xstd,
ystd...)``.
"""
var = [ufloat(x.n,x.s) for x in self.var()]
return tuple(unp.sqrt(var))
def extent(self, maxdim=None, uncert=True, pad=None):
"""Extent of axes and data
Returns:
tuple: extent like ``(xmin, xmax, ymin ...)``.
By default, this includes the uncertainty if the last dimension is the
histogram's data and not an axis::
[xmin, xmax, ymin, ymax, ..., min(data-uncert), max(data+uncert)]
padding is given as a percent of the actual extent of the axes or data
(plus uncertainty) and can be either a single floating point number or
a list of length ``2 * maxdim``.
"""
if maxdim is None:
maxdim = self.dim + 1
ext = []
for ax in self.axes[:maxdim]:
ext += [ax.min, ax.max]
if len(ext) < (2*maxdim):
if uncert:
ext += [self.min(), self.max()]
else:
ext += [np.nanmin(self.data), np.nanmax(self.data)]
if pad is not None:
if not isinstance(pad, Iterable):
pad = [pad]*(2*maxdim)
for dim in range(maxdim):
a, b = 2*dim, 2*dim+1
w = ext[b] - ext[a]
ext[a] -= pad[a] * w
ext[b] += pad[b] * w
return tuple(ext)
def errorbars(self, maxdim=None, asratio=False):
"""Bin half-widths and data uncertainties."""
if maxdim is None:
maxdim = self.dim + 1
ret = [0.5 * ax.binwidths() for ax in self.axes[:maxdim]]
if len(ret) < maxdim:
ret += [self.uncert]
if asratio:
for x, ax in zip(ret, self.axes):
x /= ax.range
if maxdim > self.dim:
ret[-1] /= self.data.max() - self.data.min()
return ret
def asline(self):
"""Points describing this histogram as a line."""
assert self.dim == 1, 'only 1D histograms can be translated into a line.'
x = self.axes[0].edges
y = self.data
xx = np.column_stack([x[:-1], x[1:]]).ravel()
yy = np.column_stack([y, y]).ravel()
extent = [min(xx), max(xx), min(yy), max(yy)]
return xx, yy, extent
### self modifying methods (set, fill)
def __getitem__(self, *args):
"""Direct access to the filled data."""
return self.data.__getitem__(*args)
def __setitem__(self, *args):
"""Direct access to the filled data."""
return self.data.__setitem__(*args)
def set(self, val, uncert=None):
"""Set filled data to specific values.
This will set the uncertainty to ``None`` by default and will accept a
single value or an array the same shape as the data. Data will be cast
to the data type already stored in the histogram.
"""
if isinstance(val, np.ndarray):
self.data.T[...] = val.T
else:
self.data[...] = val
if uncert is None:
del self.uncert
else:
if not self.has_uncert:
self._uncert = np.empty(self.data.shape)
if isinstance(uncert, np.ndarray):
self.uncert.T[...] = uncert.T
else:
self.uncert[...] = uncert
def set_nans(self, val=0, uncert=0):
"""Set all NaNs to a specific value."""
self.data[np.isnan(self.data)] = val
if self.has_uncert:
self.uncert[np.isnan(self.uncert)] = uncert
def set_infs(self, val=0, uncert=0):
"""Set all infinity values to a specific value."""
self.data[np.isinf(self.data)] = val
if self.has_uncert:
self.uncert[np.isinf(self.uncert)] = uncert
def set_nonfinites(self, val=0, uncert=0):
"""Set all non-finite values to a specific value."""
self.data[~np.isfinite(self.data)] = val
if self.has_uncert:
self.uncert[~np.isfinite(self.uncert)] = uncert
def reset(self):
"""Set data to zero and uncertainty to `None`."""
self.set(0)
self.uncert = None
def fill(self, *args):
"""Fill histogram with sample data.
Arguments (``\*args``) are the sample of data with optional associated
weights. Weights may be a single number or an array of length `N`. The
default (``None``) is equivalent to ``weights=1``. Example::
from histogram import Histogram
### 1D Example
h = Histogram(10, [0, 10])
h.fill(1) # single value
h.fill(2, 2) # single value with weight
h.fill([3, 3, 3]) # data sample
h.fill([4, 4], 2) # data sample with constant weight
h.fill([5, 5], [2, 3]) # data sample with variable weights
print(h)
# output:
# [0 1 2 3 4 5 0 0 0 0]
### 2D Example
h = Histogram(3, [0, 3], 10, [0, 10])
xdata = [0, 0, 1, 1, 2]
ydata = [1, 2, 3, 4, 5]
weights = [1, 2, 3, 4, 5]
h.fill(xdata, ydata, weights)
print(h)
# output:
# [[0 1 2 0 0 0 0 0 0 0]
# [0 0 0 3 4 0 0 0 0 0]
# [0 0 0 0 0 5 0 0 0 0]]
"""
if len(args) > self.dim:
sample = args[:-1]
weights = args[-1]
else:
sample = args
weights = None
self.fill_from_sample(sample, weights)
def fill_one(self, pt, wt=1):
"""Fill a single data point
This increments a single bin by weight ``wt``. While it is the fastest
method for a single entry, it should only be used as a last resort
because its at least an order of magnitude slower than
:py:meth:`Histogram.fill_from_sample` when filling many entries.
"""
try:
if pt < self.axes[0].min or self.axes[0].max < pt:
return
self.data[self.axes[0].bin(pt)] += wt
except ValueError:
b = []
for x, ax, m in zip(pt, self.axes, self.data.shape):
if x < ax.min or ax.max < x:
return
b += [ax.bin(x)]
self.data[tuple(b)] += wt
def fill_from_sample(self, sample, weights=None):
"""Fill histogram from sample of data
This fills the histogram from sample with shape `(D, N)` array where `D`
is the dimension of the histogram and `N` is the number of points to
fill. The optional ``weights`` may be a single number or an array of
length `N`. The default (``None``) is equivalent to ``weights = 1``.
This is the primary work-horse of the :py:class:`Histogram` class and
should be favored, along with the wrapper method
:py:meth:`Histogram.fill` over :py:meth:`Histogram.fill_one`.
Example::
from numpy import random as rand
from matplotlib import pyplot
from histogram import Histogram
rand.seed(1)
### 1D Example
h = Histogram(10, [0, 10], 30, [0, 10])
h.fill_from_sample(rand.normal(5, 1, (2, 20000)))
fig, ax = pyplot.subplots(figsize=(4, 2.5))
pt = ax.plothist(h)
fig.tight_layout()
pyplot.show()
.. image:: images/histogram_fill_from_sample.png
"""
if not isinstance(sample, np.ndarray):
sample = np.array(sample)
wt = weights
if weights is not None:
if not isinstance(weights, Iterable):
wt = np.empty((sample.T.shape[0], ))
wt[...] = weights
h, e = np.histogramdd(sample.T, self.edges, weights=wt)
self.data += h.astype(self.data.dtype)
### operations
def __deepcopy__(self, memo=None):
"""Create a complete copy of this histogram."""
return self.copy()
def __copy__(self):
"""Create a complete copy of this histogram."""
return self.copy()
def copy(self, dtype=None, **kwargs):
"""Copy this histogram optionally changing dtype and labels."""
cls = self.__class__
newhist = cls.__new__(cls)
newhist.axes = [deepcopy(ax) for ax in self.axes]
if dtype is None:
newhist._data = deepcopy(self._data)
else:
newhist._data = self._data.astype(dtype)
newhist.title = kwargs.get('title', deepcopy(self.title))
newhist.label = kwargs.get('label', deepcopy(self.label))
if self.has_uncert:
newhist._uncert = copy(self._uncert)
return newhist
def __iadd__(self, that):
"""In-place addition."""
if isinstance(that, Histogram):
if self.has_uncert or that.has_uncert:
self_data = unp.uarray(self.data, self.uncert)
that_data = unp.uarray(that.data, that.uncert)
self_data.T[...] += that_data.T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
else:
self.data.T[...] += that.data.T
else:
self_data = unp.uarray(self.data, self.uncert)
self_data.T[...] += np.asarray(that).T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
return self
def __radd__(self, that):
"""Commuting addition."""
return self + that
def __add__(self, that):
"""Addition."""
if isinstance(that, Histogram) and self.dim < that.dim:
return that + self
else:
if isinstance(that, Histogram):
that_dtype = that.data.dtype
else:
that_dtype = np.dtype(type(that))
copy_dtype = None
if self.has_uncert or (isinstance(that, Histogram) and
that.has_uncert):
copy_dtype = np.float64
elif self.data.dtype != that_dtype:
inttypes = [np.int32, np.int64]
if (self.data.dtype in inttypes) and \
(that_dtype in inttypes):
copy_dtype = np.int64
else:
copy_dtype = np.float64
ret = self.copy(copy_dtype, label=None)
ret += that
return ret
def __isub__(self, that):
"""In-place subtraction."""
if isinstance(that, Histogram):
self_data = unp.uarray(self.data, self.uncert)
that_data = unp.uarray(that.data, that.uncert)
self_data.T[...] -= that_data.T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
else:
self_data = unp.uarray(self.data, self.uncert)
self_data.T[...] -= np.asarray(that).T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
return self
def __rsub__(self, that):
"""Commuting subtraction."""
ret = self.copy(np.float64, label=None)
ret.data.T[...] = unp.nominal_values(that).T
ret._uncert = np.empty(shape=ret.data.shape)
ret._uncert.T[...] = unp.std_devs(that).T
ret -= self
return ret
def __sub__(self, that):
"""Subtraction."""
ret = self.copy(np.float64, label=None)
ret -= that
return ret
def __imul__(self, that):
"""In-place multiplication."""
if isinstance(that, Histogram):
self_data = unp.uarray(self.data, self.uncert)
that_data = unp.uarray(that.data, that.uncert)
self_data.T[...] *= that_data.T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
else:
self_data = unp.uarray(self.data, self.uncert)
self_data.T[...] *= np.asarray(that).T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
return self
def __rmul__(self, that):
"""Commuting mulitplication."""
return self * that
def __mul__(self, that):
"""Multiplication."""
ret = self.copy(np.float64, label=None)
ret *= that
return ret
def __itruediv__(self, that):
"""In-place (true) division."""
if isinstance(that, Histogram):
infs = np.isclose(that.data, 0)
nans = np.isclose(self.data, 0) & infs
ninfs = (self.data < 0) & infs
sel = ~(infs | nans)
self_data = unp.uarray(self.data[sel], self.uncert[sel])
that_data = unp.uarray(that.data[sel], that.uncert[sel])
self_data.T[...] /= that_data.T
self.data[sel] = unp.nominal_values(self_data)
self.uncert[sel] = unp.std_devs(self_data)
self.data[infs] = np.inf
self.data[ninfs] = -np.inf
self.data[nans] = np.nan
self.uncert[infs | nans] = np.nan
else:
self_data = unp.uarray(self.data, self.uncert)
self_data.T[...] /= np.asarray(that).T
self.data[...] = unp.nominal_values(self_data)
self.uncert = unp.std_devs(self_data)
return self
def __rtruediv__(self, that):
"""Commuting (true) division.
that = 1.
hret = that / hself
"""
ret = self.copy(np.float64, label=None)
ret.data.T[...] = unp.nominal_values(that).T
ret._uncert = np.empty(shape=ret.data.shape)
ret._uncert.T[...] = unp.std_devs(that).T
ret /= self
return ret
def __truediv__(self, that):
"""Division."""
ret = self.copy(np.float64)
ret /= that
return ret
### interpolating and smoothing
def interpolate_nonfinites(self, method='cubic', **kwargs):
"""Replace non-finite bins with interpolated values.
Keyword Args:
method (str): passed directly to
:py:func:`scipy.interpolate.griddata` and controls the method
of interpolation used.
**kwargs: Passed directly to :py:func:`scipy.interpolate.griddata`.
This modifies the histogram, changing the data in-place. Bins are
considered non-finite if the filled value or the uncertainty is ``nan``
or ``inf``.
"""
if not issubclass(self.data.dtype.type, Integral):
finite = np.isfinite(self.data).ravel()
if self.has_uncert:
finite &= np.isfinite(self.uncert).ravel()
if not all(finite):
g = self.grid()
points = np.vstack(g).reshape(self.dim, -1).T
values = self.data.ravel()
self.data[...] = interpolate.griddata(
points[finite],
values[finite],
points,
method=method,
**kwargs).reshape(self.shape)
if self.has_uncert:
values = self.uncert.ravel()
self.uncert[...] = interpolate.griddata(
points[finite],
values[finite],
points,
method=method,
**kwargs).reshape(self.shape)
def smooth(self, weight=0.5, sigma=1, mode='nearest', **kwargs):
"""Smooth the histogram using a Gaussian filter.
Keyword Args:
weight (float [0, 1]): Linear weighting for Gaussian filter. A
value of 1 will replace the data with the actual result from
:py:func:`scipy.ndimage.filters.gaussian_filter`.
sigma (float or sequence of floats): Passed directly to
:py:func:`scipy.ndimage.filters.gaussian_filter`.
mode (str): Passed directly to
:py:func:`scipy.ndimage.filters.gaussian_filter`.
**kwargs: Passed directly to
:py:func:`scipy.ndimage.filters.gaussian_filter`.
All non-finite bins are filled using
:py:meth:`Histogram.interpolate_nans` before the Gaussian filter is
applied. If the underlying data is of integral type, it will be
converted to `numpy.float64` before the filter is applied.
"""
self.interpolate_nonfinites()
if issubclass(self.data.dtype.type, Integral):
self._data = self.data.astype(np.float64)
Zf = ndimage.filters.gaussian_filter(self.data, sigma=sigma, mode=mode,
**kwargs)
self.data = weight * Zf + (1. - weight) * self.data
if self.has_uncert:
Uf = ndimage.filters.gaussian_filter(self.uncert, sigma=sigma,
mode=mode, **kwargs)
self.uncert = weight * Uf + (1. - weight) * self.uncert
### slicing and shape changing
def slices_data(self, axis=0):
"""Iterable over the data along specified axis."""
return np.rollaxis(self.data, axis)
def slices_uncert(self, axis=0):
"""Iterable over the uncertainty along specified axis."""
uncert = self.uncert
return np.rollaxis(uncert, axis)
def slices(self, axis=0):
"""Generator of histograms along specified axis."""
if self.has_uncert:
uncert_slices = self.slices_uncert(axis)
else:
uncert_slices = [None]*self.axes[axis].nbins
for d, u in zip(self.slices_data(axis), uncert_slices):
yield Histogram(
*[a for i, a in enumerate(self.axes) if i != axis],
data=d,
uncert=u,
title=self.title,
label=self.label)
def rebin(self, nbins=2, axis=0, snap='low', clip=True):
"""Create a new histogram with merged bins along an axis.
Keyword Args:
nbins (int): Number of bins to merge.
axis (int): Axis along which to merge bins.
snap (str): Controls edge behavior if `nbins` does not evenly
divide the number of bins in this `axis`.
clip (bool): Wether or not to include the non-uniform bin in the
case that `bins` does not evenly divide the number of bins in
this `axis`.
"""
axnew = [ax.mergebins(nbins, snap, clip) if i == axis else ax
for i, ax in enumerate(self.axes)]
if self.has_uncert:
x = unp.uarray(self.data.astype(np.float64), copy(self.uncert))
else:
x = self.data.copy()
x = np.rollaxis(x, axis, 0)
a = x.shape[0]
d, r = divmod(a, nbins)
shp = [d, nbins]
if len(x.shape) > 1:
shp += list(x.shape[1:])
if r == 0:
x = x.reshape(shp)
else:
if not clip:
shp[0] = shp[0] + r
zeros = np.zeros([nbins - r] + list(x.shape[1:]))
if snap == 'low':
x = np.concatenate((x, zeros))
else:
x = np.concatenate((zeros, x))
x = np.resize(x, shp)
x = x.sum(1)
x = np.rollaxis(x, 0, axis + 1)
if self.has_uncert:
data = unp.nominal_values(x)
uncert = unp.std_devs(x)
else:
data = x
uncert = None
return Histogram(*axnew, title=self.title, label=self.label,
data=data, uncert=uncert)
def cut(self, *args, **kwargs):
"""Truncate a histogram along one or more axes.
To cut (truncate) a one dimensional histogram from 0 to 5,
each of the following are valid. This will also cut an
ND histogram along the first (x) axis::
hcut = h.cut(0, 5)
hcut = h.cut((0, 5))
hcut = h.cut(0, 5, axis=0)
hcut = h.cut((0, 5), axis=0)
To cut along the second (y) axis, you can do the following::
hcut = h.cut(0, 5, axis=1)
hcut = h.cut((0, 5), axis=1)
hcut = h.cut(None, (0, 5))
hcut = h.cut((None, None), (0, 5))
To cut only on one side, `None` may be used to indicate
+/- infinity. This makes a lower-bound type cut at 0::
hcut = h.cut(0, None)
Finally, to cut on multiple dimensions at once, the cut
ranges can be strung together. These examples cut
along the first axis (x) from 0 to 5 and along the second
axis (y) from 1 to 6::
hcut = h.cut(0, 5, 1, 6)
hcut = h.cut((0, 5), (1, 6))
The first example above is useful for cutting 2D histogram
using extent lists as used in other libraries like
matplotlib::
hcut = h.cut(*ax.extent())
where, for example, `ax.extent()` returns the extent in
the form::
[xmin, xmax, ymin, ymax]
"""
axis = kwargs.pop('axis', None)
rng = []
for a in args:
if isinstance(a, Iterable):
rng += a
if len(a)== 1:
rng += [None]
else:
rng += [a]
if (len(rng) % 2) == 1:
rng.append(None)
rng = np.asarray(rng)
rng.shape = (-1, 2)
if axis is not None:
inrng = copy(rng)
rng = [[None, None] for _ in range(self.dim)]
rng[axis] = inrng[0]
rng = np.asarray(rng)
newaxes = []
newdata = copy(self.data)
if self.has_uncert:
newuncert = copy(self.uncert)
else:
newuncert = None
for i, (r, ax) in enumerate(zip(rng, self.axes)):
xlow, xhigh = r
if (xlow is None) and (xhigh is None):
newaxes += [ax.copy()]
else:
a, m = ax.cut(xlow, xhigh, ('nearest', 'nearest'))
indices = np.argwhere(m)[:, 0]
newaxes += [a]
newdata = newdata.take(indices, i)
if newuncert is not None:
newuncert = newuncert.take(indices, i)
return Histogram(*newaxes,
data = newdata,
uncert = newuncert,
title = kwargs.get('title', copy(self.title)),
label = kwargs.get('label', copy(self.label)))
def occupancy(self, bins=100, limits=None, **kwargs):
"""Histogram the filled data of this histogram
Returns a new histogram showing the occupancy of the data. This is
effectively histograming the data points, ignoring the axes and
uncertanties.
"""
nans = np.isnan(self.data)
if limits is None:
limits = [self[~nans].min(), self[~nans].max()]
ret = Histogram(bins, limits, **kwargs)
ret.fill_from_sample(self[~nans].ravel())
return ret
### curve fitting
def fit(self, fcn, p0, **kwargs):
"""Fit a function to the histogram
Fits the function ``fcn`` to the histogram, returning estimated
parameters, their covariance matrix and a tuple containing the
specified test result (chi-square test is default).
"""
test = str(kwargs.pop('test', 'chisquare')).lower()
uncert = kwargs.pop('uncert', self.uncert)
sel = kwargs.pop('sel', np.ones(self.data.shape, dtype=bool))
if 'sigma' in kwargs:
raise ValueError('"sigma" keyword not valid, use "uncert".')
if 'absolute_sigma' in kwargs:
raise ValueError('"absolute_sigma" ignored (considered True).')
# initial parameters
if hasattr(p0, '__call__'):
kwargs['p0'] = p0(self)
else:
kwargs['p0'] = copy(p0)
npar = len(kwargs['p0'])
# data selection
sel &= np.isfinite(self.data)
xx = self.grid()
for x in xx:
sel &= np.isfinite(x)
if uncert is not None:
# consider only finite uncertainties
sel &= np.isfinite(uncert)
if np.allclose(uncert[0], uncert):
# uncertainties are the same value
uncert = None
else:
# throw out uncertainties equal to zero
sel &= ~ np.isclose(uncert, 0)
if np.count_nonzero(sel) < npar:
raise ValueError('Not enough data.')
# make selection on grid
xx = np.squeeze(tuple(x[sel].astype(np.float64) for x in xx))
# make selection on data at grid points
yy = self.data[sel].astype(np.float64)
# make selection on uncertainty
if uncert is not None:
kwargs['sigma'] = uncert[sel].astype(np.float64)
kwargs['absolute_sigma'] = True
# perform the fit
pfit, pcov = opt.curve_fit(fcn, xx, yy, **kwargs)
if not isinstance(pcov, np.ndarray) or np.isinf(pcov).any():
raise RuntimeError('Bad fit.')
### perform goodness of fit test
if test != 'none':
try:
N = len(xx)
except:
N = 1
m = npar
ndf = N - m
yyfit = fcn(xx, *pfit)
dyy = yy - yyfit
# nchar is the minimum number of characters
# used to disambiguate goodness of fit tests.
# at the moment, one letter is sufficient.
nchar = 1
if test[:nchar] == 'kstest'[:nchar]:
# two-sided Kolmogorov-Smirov test
D, pval = stats.kstest(dyy,
stats.norm(0, dyy.std()).cdf)
ptest = (D, pval)
elif test[:nchar] == 'shapiro'[:nchar]:
# Shapiro-Wilk test
W, pval = stats.shapiro(dyy)
ptest = (W, pval)
else: # test[:nchar] == 'chisquare'[:nchar]:
# simple Chi-squared test
chisq, pval = stats.chisquare(yy, yyfit, len(pfit))
ptest = (chisq/ndf, pval)
return pfit, pcov, ptest
return pfit, pcov
| gpl-3.0 |
DGrady/pandas | pandas/plotting/_compat.py | 11 | 1602 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
from distutils.version import LooseVersion
def _mpl_le_1_2_1():
try:
import matplotlib as mpl
return (str(mpl.__version__) <= LooseVersion('1.2.1') and
str(mpl.__version__)[0] != '0')
except ImportError:
return False
def _mpl_ge_1_3_1():
try:
import matplotlib
# The or v[0] == '0' is because their versioneer is
# messed up on dev
return (matplotlib.__version__ >= LooseVersion('1.3.1') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_4_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.4') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_5_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.5') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_2_0_0():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0')
except ImportError:
return False
def _mpl_le_2_0_0():
try:
import matplotlib
return matplotlib.compare_versions('2.0.0', matplotlib.__version__)
except ImportError:
return False
def _mpl_ge_2_0_1():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0.1')
except ImportError:
return False
| bsd-3-clause |
gfyoung/pandas | pandas/tests/series/indexing/test_getitem.py | 1 | 16680 | """
Series.__getitem__ test classes are organized by the type of key passed.
"""
from datetime import date, datetime, time
import numpy as np
import pytest
from pandas._libs.tslibs import conversion, timezones
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
class TestSeriesGetitemScalars:
def test_getitem_negative_out_of_bounds(self):
ser = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
ser[-11]
def test_getitem_out_of_bounds_indexerror(self, datetime_series):
# don't segfault, GH#495
msg = r"index \d+ is out of bounds for axis 0 with size \d+"
with pytest.raises(IndexError, match=msg):
datetime_series[len(datetime_series)]
def test_getitem_out_of_bounds_empty_rangeindex_keyerror(self):
# GH#917
# With a RangeIndex, an int key gives a KeyError
ser = Series([], dtype=object)
with pytest.raises(KeyError, match="-1"):
ser[-1]
def test_getitem_keyerror_with_int64index(self):
ser = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
with pytest.raises(KeyError, match=r"^5$"):
ser[5]
with pytest.raises(KeyError, match=r"^'c'$"):
ser["c"]
# not monotonic
ser = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
with pytest.raises(KeyError, match=r"^5$"):
ser[5]
with pytest.raises(KeyError, match=r"^'c'$"):
ser["c"]
def test_getitem_int64(self, datetime_series):
idx = np.int64(5)
assert datetime_series[idx] == datetime_series[5]
# TODO: better name/GH ref?
def test_getitem_regression(self):
ser = Series(range(5), index=list(range(5)))
result = ser[list(range(5))]
tm.assert_series_equal(result, ser)
# ------------------------------------------------------------------
# Series with DatetimeIndex
@pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"])
def test_getitem_pydatetime_tz(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
index = date_range(
start="2012-12-24 16:00", end="2012-12-24 18:00", freq="H", tz=tzstr
)
ts = Series(index=index, data=index.hour)
time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr)
dt = datetime(2012, 12, 24, 17, 0)
time_datetime = conversion.localize_pydatetime(dt, tz)
assert ts[time_pandas] == ts[time_datetime]
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_string_index_alias_tz_aware(self, tz):
rng = date_range("1/1/2000", periods=10, tz=tz)
ser = Series(np.random.randn(len(rng)), index=rng)
result = ser["1/3/2000"]
tm.assert_almost_equal(result, ser[2])
def test_getitem_time_object(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = Series(np.random.randn(len(rng)), index=rng)
mask = (rng.hour == 9) & (rng.minute == 30)
result = ts[time(9, 30)]
expected = ts[mask]
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Series with CategoricalIndex
def test_getitem_scalar_categorical_index(self):
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
ser = Series([1, 2], index=cats)
expected = ser.iloc[0]
result = ser[cats[0]]
assert result == expected
def test_getitem_str_with_timedeltaindex(self):
rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
ser = Series(np.arange(len(rng)), index=rng)
key = "6 days, 23:11:12"
indexer = rng.get_loc(key)
assert indexer == 133
result = ser[key]
assert result == ser.iloc[133]
msg = r"^Timedelta\('50 days 00:00:00'\)$"
with pytest.raises(KeyError, match=msg):
rng.get_loc("50 days")
with pytest.raises(KeyError, match=msg):
ser["50 days"]
class TestSeriesGetitemSlices:
def test_getitem_partial_str_slice_with_datetimeindex(self):
# GH#34860
arr = date_range("1/1/2008", "1/1/2009")
ser = arr.to_series()
result = ser["2008"]
rng = date_range(start="2008-01-01", end="2008-12-31")
expected = Series(rng, index=rng)
tm.assert_series_equal(result, expected)
def test_getitem_slice_strings_with_datetimeindex(self):
idx = DatetimeIndex(
["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"]
)
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts["1/2/2000":]
expected = ts[1:]
tm.assert_series_equal(result, expected)
result = ts["1/2/2000":"1/3/2000"]
expected = ts[1:4]
tm.assert_series_equal(result, expected)
def test_getitem_slice_2d(self, datetime_series):
# GH#30588 multi-dimensional indexing deprecated
with tm.assert_produces_warning(FutureWarning):
# GH#30867 Don't want to support this long-term, but
# for now ensure that the warning from Index
# doesn't comes through via Series.__getitem__.
result = datetime_series[:, np.newaxis]
expected = datetime_series.values[:, np.newaxis]
tm.assert_almost_equal(result, expected)
# FutureWarning from NumPy.
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_getitem_median_slice_bug(self):
index = date_range("20090415", "20090519", freq="2B")
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
with tm.assert_produces_warning(FutureWarning):
# GH#31299
result = s[indexer]
expected = s[indexer[0]]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"slc, positions",
[
[slice(date(2018, 1, 1), None), [0, 1, 2]],
[slice(date(2019, 1, 2), None), [2]],
[slice(date(2020, 1, 1), None), []],
[slice(None, date(2020, 1, 1)), [0, 1, 2]],
[slice(None, date(2019, 1, 1)), [0]],
],
)
def test_getitem_slice_date(self, slc, positions):
# https://github.com/pandas-dev/pandas/issues/31501
ser = Series(
[0, 1, 2],
DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
)
result = ser[slc]
expected = ser.take(positions)
tm.assert_series_equal(result, expected)
def test_getitem_slice_float_raises(self, datetime_series):
msg = (
"cannot do slice indexing on DatetimeIndex with these indexers "
r"\[{key}\] of type float"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0]
class TestSeriesGetitemListLike:
@pytest.mark.parametrize("box", [list, np.array, Index, pd.Series])
def test_getitem_no_matches(self, box):
# GH#33462 we expect the same behavior for list/ndarray/Index/Series
ser = Series(["A", "B"])
key = Series(["C"], dtype=object)
key = box(key)
msg = r"None of \[Index\(\['C'\], dtype='object'\)\] are in the \[index\]"
with pytest.raises(KeyError, match=msg):
ser[key]
def test_getitem_intlist_intindex_periodvalues(self):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
result = ser[[2, 4]]
exp = Series(
[pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")],
index=[2, 4],
dtype="Period[D]",
)
tm.assert_series_equal(result, exp)
assert result.dtype == "Period[D]"
@pytest.mark.parametrize("box", [list, np.array, Index])
def test_getitem_intlist_intervalindex_non_int(self, box):
# GH#33404 fall back to positional since ints are unambiguous
dti = date_range("2000-01-03", periods=3)._with_freq(None)
ii = pd.IntervalIndex.from_breaks(dti)
ser = Series(range(len(ii)), index=ii)
expected = ser.iloc[:1]
key = box([0])
result = ser[key]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("box", [list, np.array, Index])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, np.uint64])
def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):
# GH#33404 do _not_ fall back to positional since ints are ambiguous
idx = Index(range(4)).astype(dtype)
dti = date_range("2000-01-03", periods=3)
mi = pd.MultiIndex.from_product([idx, dti])
ser = Series(range(len(mi))[::-1], index=mi)
key = box([5])
with pytest.raises(KeyError, match="5"):
ser[key]
def test_getitem_uint_array_key(self, uint_dtype):
# GH #37218
ser = Series([1, 2, 3])
key = np.array([4], dtype=uint_dtype)
with pytest.raises(KeyError, match="4"):
ser[key]
with pytest.raises(KeyError, match="4"):
ser.loc[key]
class TestGetitemBooleanMask:
def test_getitem_boolean(self, string_series):
ser = string_series
mask = ser > ser.median()
# passing list is OK
result = ser[list(mask)]
expected = ser[mask]
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.index, ser.index[mask])
def test_getitem_boolean_empty(self):
ser = Series([], dtype=np.int64)
ser.index.name = "index_name"
ser = ser[ser.isna()]
assert ser.index.name == "index_name"
assert ser.dtype == np.int64
# GH#5877
# indexing with empty series
ser = Series(["A", "B"])
expected = Series(dtype=object, index=Index([], dtype="int64"))
result = ser[Series([], dtype=object)]
tm.assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
msg = (
r"Unalignable boolean Series provided as indexer \(index of "
r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ser[Series([], dtype=bool)]
with pytest.raises(IndexingError, match=msg):
ser[Series([True], dtype=bool)]
def test_getitem_boolean_object(self, string_series):
# using column from DataFrame
ser = string_series
mask = ser > ser.median()
omask = mask.astype(object)
# getitem
result = ser[omask]
expected = ser[mask]
tm.assert_series_equal(result, expected)
# setitem
s2 = ser.copy()
cop = ser.copy()
cop[omask] = 5
s2[mask] = 5
tm.assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
msg = "Cannot mask with non-boolean array containing NA / NaN values"
with pytest.raises(ValueError, match=msg):
ser[omask]
with pytest.raises(ValueError, match=msg):
ser[omask] = 5
def test_getitem_boolean_dt64_copies(self):
# GH#36210
dti = date_range("2016-01-01", periods=4, tz="US/Pacific")
key = np.array([True, True, False, False])
ser = Series(dti._data)
res = ser[key]
assert res._values._data.base is None
# compare with numeric case for reference
ser2 = Series(range(4))
res2 = ser2[key]
assert res2._values.base is None
def test_getitem_boolean_corner(self, datetime_series):
ts = datetime_series
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
msg = (
r"Unalignable boolean Series provided as indexer \(index of "
r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted]
with pytest.raises(IndexingError, match=msg):
ts.loc[mask_shifted]
def test_getitem_boolean_different_order(self, string_series):
ordered = string_series.sort_values()
sel = string_series[ordered > 0]
exp = string_series[string_series > 0]
tm.assert_series_equal(sel, exp)
def test_getitem_boolean_contiguous_preserve_freq(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq == rng.freq
tm.assert_index_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
class TestGetitemCallable:
def test_getitem_callable(self):
# GH#12533
ser = Series(4, index=list("ABCD"))
result = ser[lambda x: "A"]
assert result == ser.loc["A"]
result = ser[lambda x: ["A", "B"]]
expected = ser.loc[["A", "B"]]
tm.assert_series_equal(result, expected)
result = ser[lambda x: [True, False, True, True]]
expected = ser.iloc[[0, 2, 3]]
tm.assert_series_equal(result, expected)
def test_getitem_generator(string_series):
gen = (x > 0 for x in string_series)
result = string_series[gen]
result2 = string_series[iter(string_series > 0)]
expected = string_series[string_series > 0]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
@pytest.mark.parametrize(
"series",
[
Series([0, 1]),
Series(date_range("2012-01-01", periods=2)),
Series(date_range("2012-01-01", periods=2, tz="CET")),
],
)
def test_getitem_ndim_deprecated(series):
with tm.assert_produces_warning(
FutureWarning, match="Support for multi-dimensional indexing"
):
result = series[:, None]
expected = np.asarray(series)[:, None]
tm.assert_numpy_array_equal(result, expected)
def test_getitem_multilevel_scalar_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data,
):
# not implementing this for now
df = multiindex_year_month_day_dataframe_random_data
ser = df["A"]
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
ser[2000, 3:4]
def test_getitem_dataframe_raises():
rng = list(range(10))
ser = Series(10, index=rng)
df = DataFrame(rng, index=rng)
msg = (
"Indexing a Series with DataFrame is not supported, "
"use the appropriate DataFrame column"
)
with pytest.raises(TypeError, match=msg):
ser[df > 5]
def test_getitem_assignment_series_aligment():
# https://github.com/pandas-dev/pandas/issues/37427
# with getitem, when assigning with a Series, it is not first aligned
ser = Series(range(10))
idx = np.array([2, 4, 9])
ser[idx] = Series([10, 11, 12])
expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
tm.assert_series_equal(ser, expected)
def test_getitem_duplicate_index_mistyped_key_raises_keyerror():
# GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError
ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
with pytest.raises(KeyError, match="None"):
ser[None]
with pytest.raises(KeyError, match="None"):
ser.index.get_loc(None)
with pytest.raises(KeyError, match="None"):
ser.index._engine.get_loc(None)
def test_getitem_1tuple_slice_without_multiindex():
ser = Series(range(5))
key = (slice(3),)
result = ser[key]
expected = ser[key[0]]
tm.assert_series_equal(result, expected)
def test_getitem_preserve_name(datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[[0, 2, 4]]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
| bsd-3-clause |
JT5D/scikit-learn | sklearn/mixture/tests/test_gmm.py | 9 | 12379 | import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
whitergh/brainx | brainx/util.py | 2 | 47289 | """Generic utilities that may be needed by the other modules.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import warnings
import numpy as np
import networkx as nx
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def dictset_to_listset(dict_set):
""" converts a dict of sets to a list of sets
for converting partition.community objects"""
if isinstance(dict_set, dict) \
and _contains_only(dict_set, set):
return list(dict_set.values())
raise ValueError('{0} is not a dict of sets'.format(dict_set))
def listset_to_dictset(list_set):
""" converts a list of sets to a dict of sets
for converting partition.community objects"""
## check input is dict of sets
if isinstance(list_set, list) and \
_contains_only(list_set, set):
return {val: value for val, value in enumerate(list_set)}
raise ValueError('{0} is not a list of sets'.format(list_set))
def _no_repeats_in_listlist(list_list):
""" checks for duplicates in list of lists
returns True or False"""
if isinstance(list_list, list) and \
_contains_only(list_list, list):
allitems = [item for sublist in list_list for item in sublist]
return len(allitems) == len(set(allitems))
raise ValueError('{0} is not a list of lists'.format(list_list))
def _contains_only(container, type):
"""check that contents of a container are all of the same type"""
try:
container = container.values() # dict
except AttributeError:
pass
return all(isinstance(s, type) for s in container)
def listlist_to_listset(list_list):
""" converts list of lists to a list of sets (with check)
for converting partition.community objects"""
if _no_repeats_in_listlist(list_list):
return [set(x) for x in list_list]
else:
raise ValueError('found duplicate(s) in {0}, cannot validly format to '\
'list of sets'.format(list_list))
def slice_data(data, sub, block, subcond=None):
""" pull symmetric matrix from data block (4D or 5D)
Parameters
----------
data : numpy array
4D array (block, sub, nnode, nnode)
5D array (subcond, block, sub, nnode, nnode)
sub : int
int representing subject to index in data
block : int
int representing block to index in data
subcond : int
int representing optional subcondition from 5D array
Returns
-------
adjacency_matrix : numpy array
symmetric numpy array (innode, nnode)
"""
if subcond is None:
return data[block, sub]
return data[subcond, block, sub]
def format_matrix(data, s, b, lk, co, idc=[], costlist=[],
nouptri=False, asbool=True):
""" Function which thresholds the adjacency matrix for a particular
subject and particular block, using lookuptable to find thresholds,
cost value to find threshold, costlist
(thresholds, upper-tris it) so that we can use it with simulated annealing
Parameters
-----------
data : full data array 4D (block, sub, node, node)
s : int
subject
b : int
block
lk : numpy array
lookup table for study
co : int
cost value to threshold at
idc : int
index of ideal cost
costlist : list
list (size num_edges) with ordered values used to find
threshold to control number of edges
nouptri : bool
if False only keeps upper tri, True yields symmetric matrix
asbool : bool
if True return boolean mask, otherwise returns thesholded
weight matrix
"""
cmat = slice_data(data, s, b)
th = cost2thresh(co,s,b,lk,idc,costlist) #get the right threshold
cmat = thresholded_arr(cmat,th,fill_val=0)
if not nouptri:
cmat = np.triu(cmat,1)
if asbool:
return ~(cmat == 0)
return cmat
def format_matrix2(data, s, sc, c, lk, co, idc=[],
costlist=[], nouptri=False, asbool=True):
""" Function which formats matrix for a particular subject and
particular block (thresholds, upper-tris it) so that we can
make a graph object out of it
Parameters
----------
data : numpy array
full data array 5D (subcondition, condition, subject, node, node)
s : int
index of subject
sc : int
index of sub condition
c : int
index of condition
lk : numpy array
lookup table for thresholds at each possible cost
co : float
cost value to threshold at
idc : float
ideal cost
costlist : list
list of possible costs
nouptri : bool
False zeros out diag and below, True returns symmetric matrix
asbool : bool
If true returns boolean mask, otherwise returns thresholded w
weighted matrix
"""
cmat = slice_data(data, s, c, sc)
th = cost2thresh2(co,s,sc,c,lk,[],idc,costlist) #get the right threshold
cmat = thresholded_arr(cmat,th,fill_val=0)
if not nouptri:
cmat = np.triu(cmat,1)
if asbool:
# return boolean mask
return ~(cmat == 0)
return cmat
def format_matrix3(data, s, c, b, lk, co, idc=[],
costlist=[], nouptri=False, asbool=True):
""" Function which formats matrix for a particular subject and
particular block (thresholds, upper-tris it) so that we can
make a graph object out of it
Parameters
----------
data : numpy array
full data array 5D (subcondition, condition, subject, node, node)
s : int
index of subject
c : int
index of condition
b : int
index of block
lk : numpy array
lookup table for thresholds at each possible cost
co : float
cost value to threshold at
idc : float
ideal cost
costlist : list
list of possible costs
nouptri : bool
False zeros out diag and below, True returns symmetric matrix
asbool : bool
If true returns boolean mask, otherwise returns thresholded w
weighted matrix
"""
cmat = slice_data(data, s, b, c)
th = cost2thresh2(co,s,c,b,lk,[],idc,costlist) #get the right threshold
cmat = thresholded_arr(cmat,th,fill_val=0)
if not nouptri:
cmat = np.triu(cmat,1)
if asbool:
# return boolean mask
return ~(cmat == 0)
return cmat
def threshold_adjacency_matrix(adj_matrix, cost, uptri=False, return_thresh = False):
"""threshold adj_matrix at cost
Parameters
----------
adj_matrix : numpy array
graph adjacency matrix
cost : float
user specified cost
uptri : bool
False returns symmetric matrix, True zeros out diagonal and below
return_thresh: bool
False returns thresholded correlation matrix and expected cost, True also returns the threshold value
Returns
-------
thresholded : array of bools
binary matrix thresholded to result in cost
expected_cost : float
the real cost value (closest to cost)
thresh (optional): float
the real threshold value used to result in cost
"""
nnodes, _ = adj_matrix.shape
ind = np.triu_indices(nnodes, 1)
nedges = adj_matrix[ind].shape[0]
lookup = make_cost_thresh_lookup(adj_matrix)
cost_index = np.round(cost * float(nedges))
thresh, expected_cost, round_cost = lookup[cost_index]
adj_matrix = adj_matrix > thresh #threshold matrix
np.fill_diagonal(adj_matrix, 0) #zero out diagonal
if uptri: #also zero out below diagonal
adj_matrix = np.triu(adj_matrix)
if return_thresh: # also return threshold value
return adj_matrix, expected_cost, thresh
else:
return adj_matrix, expected_cost
def find_true_cost(boolean_matrix):
""" when passed a boolean matrix, presumably from thresholding to
achieve a specific cost, this calculates the actual cost for
this thresholded array"""
ind = np.triu_indices_from( boolean_matrix, 1)
alledges = np.array(boolean_matrix)[ind].shape[0]
found_edges = boolean_matrix[ind].sum()
return float(found_edges) / alledges
def all_positive(adjacency_matrix):
""" checks if edge values in adjacency matrix are all positive
or positive and negative
Returns
-------
all_positive : bool
True if all values are >=0
False if values < 0
"""
# add 1 so 0-> 1(True) , -1 -> 0 False
signs = set( np.sign(adjacency_matrix) + 1 )
return bool(sorted(signs)[0])
def make_cost_thresh_lookup(adjacency_matrix):
"""takes upper triangular (offset 1, no diagonal) of summetric
adjacency matrix, sorts (lowest -> highest)
Returns
-------
lookup : numpy record array
shape = number of edges
'weight' is sorted weight values (largest -> smallest)
'actual_cost' is cost at each weight (smallest -> largest)
'cost' is 'actual_costs' rounded to two decimal points
Example
-------
lookup = make_cost_thresh_lookup(adj_mat)
lookup[100]
(0.3010111736597483, 0.704225352112676, 0.7)
lookup[100].weight
0.3010111736597483
lookup[100].actual_cost
0.704225352112676
lookup[100].cost
0.70
"""
## check for nan in matrix, sorting will behave badly if nan found
if np.any(np.isnan(adjacency_matrix)):
raise ValueError('NAN found in adjacency matrix, this will cause'\
'improper behavior in sorting and improper results, '\
'please remove all nan ')
ind = np.triu_indices_from(adjacency_matrix, k = 1)
edges = adjacency_matrix[ind]
nedges = edges.shape[0]
lookup = np.recarray((nedges), dtype = [('weight', float),
('actual_cost', float),
('cost', float)])
lookup['weight'] = sorted(edges, reverse = True)
lookup['actual_cost'] = np.arange(nedges) / float(nedges)
lookup['cost'] = np.round(lookup['actual_cost'], decimals = 2)
return lookup
def cost_size(nnodes):
"""create a list of actual costs, tot_edges, edges_short
given a fixed number of nodes"""
warnings.warn('this is no longer used: use make_cost_array')
tot_edges = 0.5 * nnodes * (nnodes - 1)
costs = np.array(range(int(tot_edges) + 1), dtype=float) / tot_edges
edges_short = tot_edges / 2
return costs, tot_edges, edges_short
def make_cost_array(n_nodes, cost=0.5):
"""Make cost array of length cost * (the number of possible edges).
Parameters
----------
n_nodes: integer
Number of nodes in the graph.
cost: float, optional
Value between 0 and 1 (0.5 by default). The length of
cost_array will be set to cost * tot_edges.
Returns
-------
cost_array: numpy array
N+1-length array of costs, with N the number of possible
undirected edges in the graph. The costs range from 0 to 1 and
are equally-spaced.
tot_edges: float
Number of possible undirected edges in the graph.
Notes
-----
This is an edited version of the former function cost_size.
"""
tot_edges = 0.5 * n_nodes * (n_nodes - 1)
costs = np.array(range(int(tot_edges * cost)), dtype=float) / tot_edges
return costs, tot_edges
def metrics_to_pandas():
"""docstring for metrics_to_pandas"""
pass
def store_metrics(b, s, co, metd, arr):
"""Store a set of metrics into a structured array
b = block
s = subject
co = cost? float
metd = dict of metrics
arr : array?"""
if arr.ndim == 3:
idx = b,s,co
elif arr.ndim == 4:
idx = b,s,co,slice(None)
else:
raise ValueError("only know how to handle 3 or 4-d arrays")
for met_name, met_val in metd.items():
arr[idx][met_name] = met_val
def store_metrics2(c, b, s, co, metd, arr):
"""Store a set of metrics into a structured array
c = condition
b = block
s = subject
co = cost? float
metd = dict of metrics
arr : array?"""
if arr.ndim == 4:
idx = c,b,s,co
elif arr.ndim == 5:
idx = c,b,s,co,slice(None)
else:
raise ValueError("only know how to handle 4 or 5-d arrays")
for met_name, met_val in metd.items():
arr[idx][met_name] = met_val
def regular_lattice(n,k):
"""Return a regular lattice graph with n nodes and k neighbor connections.
This graph consists of a ring with n nodes which then get connected to
their k (k-1 if k is odd) nearest neighbors.
This type of graph is the starting point for the Watts-Strogatz small-world
model, where connections are then rewired in a second phase.
XXX TODO Use as comparison, check networkx to see if its update worth redundancy
"""
# Code simplified from the networkx.watts_strogatz_graph one
G = nx.Graph()
G.name="regular_lattice(%s,%s)"%(n,k)
nodes = list(range(n)) # nodes are labeled 0 to n-1
# connect each node to k/2 neighbors
for j in range(1, k//2+1):
targets = nodes[j:] + nodes[:j] # first j nodes are now last in list
G.add_edges_from(zip(nodes,targets))
return G
def compile_data(input,tmslabel,mat_type,scale,data_type):
"""This function reads in data into a text file"""
filename='Mean_'+data_type+'_'+tmslabel+'_'+mat_type+scale+'.txt'
f=open(filename,'a')
for i in range(0,len(input)):
f.write('%s\t' %input[i])
f.write('\n')
f.close()
def arr_stat(x,ddof=1):
"""Return (mean,stderr) for the input array"""
m = x.mean()
std = x.std(ddof=ddof)
return m,std
def threshold_arr(cmat, threshold=0.0, threshold2=None):
"""Threshold values from the input matrix.
Parameters
----------
cmat : array_like
An array of numbers.
threshold : float, optional
If threshold2 is None, indices and values for elements of cmat
greater than this value (0 by default) will be returned. If
threshold2 is not None, indices and values for elements of cmat
less than this value (or greater than threshold2) will be
returned.
threshold2 : float, optional
Indices and values for elements of cmat greater than this value
(or less than threshold) will be returned. By default,
threshold2 is set to None and not used.
Returns
-------
A tuple of length N + 1, where N is the number of dimensions in
cmat. The first N elements of this tuple are arrays with indices in
cmat, for each dimension, corresponding to elements greater than
threshold (if threshold2 is None) or more extreme than the two
thresholds. The last element of the tuple is an array with the
values in cmat corresponding to these indices.
Examples
--------
>>> a = np.linspace(0, 0.8, 7)
>>> a
array([ 0. , 0.1333, 0.2667, 0.4 , 0.5333,
0.6667, 0.8 ])
>>> threshold_arr(a, 0.3)
(array([3, 4, 5, 6]),
array([ 0.4 , 0.5333, 0.6667, 0.8 ]))
With two thresholds:
>>> threshold_arr(a, 0.3, 0.6)
(array([0, 1, 2, 5, 6]),
array([ 0. , 0.1333, 0.2667, 0.6667, 0.8 ]))
"""
# Select thresholds.
if threshold2 is None:
th_low = -np.inf
th_hi = threshold
else:
th_low = threshold
th_hi = threshold2
# Mask out the values we are actually going to use.
idx = np.where((cmat < th_low) | (cmat > th_hi))
vals = cmat[idx]
return idx + (vals,)
def thresholded_arr(arr, threshold=0.0, threshold2=None, fill_val=np.nan):
"""Threshold values from the input matrix and return a new matrix.
Parameters
----------
arr : array_like
An array of numbers.
threshold : float, optional
If threshold2 is None, elements of arr less than this value (0
by default) will be filled with fill_val. If threshold2 is not
None, elements of arr greater than this value but less than
threshold2 will be filled with fill_val.
threshold2 : float, optional
Elements of arr less than this value but greater than threshold
will be filled with fill_val. By default, high_thresh is set to
None and not used.
fill_val : float or numpy.nan, optional
Value (np.nan by default) with which to fill elements below
threshold or between threshold and threshold2.
Returns
-------
a2 : array_like
An array with the same shape as arr, but with values below
threshold or between threshold and threshold2 replaced with
fill_val.
Notes
-----
arr itself is not altered.
"""
a2 = np.empty_like(arr)
a2.fill(fill_val)
mth = threshold_arr(arr, threshold, threshold2)
idx,vals = mth[:-1], mth[-1]
a2[idx] = vals
return a2
def normalize(arr,mode='direct',folding_edges=None):
"""Normalize an array to [0,1] range.
By default, this simply rescales the input array to [0,1]. But it has a
special 'folding' mode that allong absolute value of all values, in addition
values between the folding_edges (low_cutoff, high_cutoff) will be zeroed.
Parameters
----------
arr : 1d array
assumes dtype == float, if int32, will raise ValueError
mode : string, one of ['direct','folding']
if direct rescale all values (pos and neg) between 0,1
if folding, zeros out values between folding_values (inclusive)
and normalizes absolute value of remaining values
folding_edges : (float,float)
(low_cutoff, high_cutoff) lower and upper values to zero out
(values are inclusive)
Only needed for folding mode, ignored in 'direct' mode.
Examples
--------
>>> np.set_printoptions(precision=4) # for doctesting
>>> a = np.linspace(0.3,0.8,7)
>>> normalize(a)
array([ 0. , 0.1667, 0.3333, 0.5 , 0.6667, 0.8333, 1. ])
>>>
>>> b = np.concatenate([np.linspace(-0.7,-0.3,4),
... np.linspace(0.3,0.8,4)] )
>>> b
array([-0.7 , -0.5667, -0.4333, -0.3 , 0.3 , 0.4667, 0.6333, 0.8 ])
>>> normalize(b,'folding',[-0.3,0.3])
array([ 0.8 , 0.5333, 0.2667, 0. , 0. , 0.3333, 0.6667, 1. ])
>>>
>>>
>>> c = np.concatenate([np.linspace(-0.8,-0.3,4),
... np.linspace(0.3,0.7,4)] )
>>> c
array([-0.8 , -0.6333, -0.4667, -0.3 , 0.3 , 0.4333, 0.5667, 0.7 ])
>>> normalize(c,'folding',[-0.3,0.3])
array([ 1. , 0.7917, 0.5833, 0. , 0. , 0.5417, 0.7083, 0.875 ])
"""
if mode == 'direct':
return rescale_arr(arr,0,1)
elif mode == 'folding':
# cast folding_edges to floats in case inputs are ints
low_cutoff, high_cutoff = [float(x) for x in folding_edges]
amin, amax = arr.min(), arr.max()
low_diff, high_diff = low_cutoff-amin, amax-high_cutoff
if low_diff < 0 or high_diff < 0:
raise ValueError("folding edges must be within array range")
mask = np.logical_and( arr >= low_cutoff, arr <= high_cutoff)
out = arr.copy()
out[mask] = 0
return rescale_arr(np.abs(out), 0, 1)
else:
raise ValueError('Unknown mode %s: valid options("direct", "folding")')
def mat2graph(cmat,threshold=0.0,threshold2=None):
"""Make a weighted graph object out of an adjacency matrix.
The values in the original matrix cmat can be thresholded out. If only one
threshold is given, all values below that are omitted when creating edges.
If two thresholds are given, then values in the th2-th1 range are
ommitted. This allows for the easy creation of weighted graphs with
positive and negative values where a range of weights around 0 is omitted.
Parameters
----------
cmat : 2-d square array
Adjacency matrix.
threshold : float
First threshold.
threshold2 : float
Second threshold.
Returns
-------
G : a NetworkX weighted graph object, to which a dictionary called
G.metadata is appended. This dict contains the original adjacency matrix
cmat, the two thresholds, and the weights
"""
# Input sanity check
nrow,ncol = cmat.shape
if nrow != ncol:
raise ValueError("Adjacency matrix must be square")
row_idx, col_idx, vals = threshold_arr(cmat,threshold,threshold2)
# Also make the full thresholded array available in the metadata
cmat_th = np.empty_like(cmat)
if threshold2 is None:
cmat_th.fill(threshold)
else:
cmat_th.fill(-np.inf)
cmat_th[row_idx,col_idx] = vals
# Next, make a normalized copy of the values. For the 2-threshold case, we
# use 'folding' normalization
if threshold2 is None:
vals_norm = normalize(vals)
else:
vals_norm = normalize(vals,'folding',[threshold,threshold2])
# Now make the actual graph
G = nx.Graph(weighted=True)
G.add_nodes_from(range(nrow))
# To keep the weights of the graph to simple values, we store the
# normalize ones in a separate dict that we'll stuff into the graph
# metadata.
normed_values = {}
for i,j,val,nval in zip(row_idx,col_idx,vals,vals_norm):
if i == j:
# no self-loops
continue
G.add_edge(i,j,weight=val)
normed_values[i,j] = nval
# Write a metadata dict into the graph and save the threshold info there
G.metadata = dict(threshold1=threshold,
threshold2=threshold2,
cmat_raw=cmat,
cmat_th =cmat_th,
vals_norm = normed_values,
)
return G
# Backwards compatibility name
mkgraph = mat2graph
def mkdigraph(cmat,dmat,threshold=0.0,threshold2=None):
"""Make a graph object out of an adjacency matrix and direction matrix"""
# Input sanity check
nrow,ncol = cmat.shape
if not nrow==ncol:
raise ValueError("Adjacency matrix must be square")
row_idx, col_idx, vals = threshold_arr(cmat,threshold,threshold2)
# Now make the actual graph
G = nx.DiGraph()
G.add_nodes_from(range(nrow))
for i,j,val in zip(row_idx,col_idx,vals):
if dmat[i,j] > 0:
G.add_edge(i,j,val)
else:
G.add_edge(j,i,val)
return G
def rescale_arr(arr,amin,amax):
"""Rescale an array to a new range.
Return a new array whose range of values is (amin,amax).
Parameters
----------
arr : array-like
amin : float
new minimum value
amax : float
new maximum value
Examples
--------
>>> a = np.arange(5)
>>> rescale_arr(a,3,6)
array([ 3. , 3.75, 4.5 , 5.25, 6. ])
"""
# old bounds
m = arr.min()
M = arr.max()
# scale/offset
s = float(amax-amin)/(M-m)
d = amin - s*m
# Apply clip before returning to cut off possible overflows outside the
# intended range due to roundoff error, so that we can absolutely guarantee
# that on output, there are no values > amax or < amin.
return np.clip(s*arr+d,amin,amax)
# backwards compatibility only, deprecated
def replace_diag(arr,val=0):
fill_diagonal(arr,val)
return arr
def cost2thresh(cost, sub, bl, lk, idc=[], costlist=[]):
"""Return the threshold associated with a particular cost.
The cost is assessed with regard to block 'bl' and subject 'sub'.
Parameters
----------
cost: float
Cost value for which the associated threshold will be returned.
sub: integer
Subject number.
bl: integer
Block number.
lk: numpy array
Lookup table with blocks X subjects X 2 (threshold or cost, in
that order) X thresholds/costs. Each threshold is a value
representing the lowest correlation value accepted. They are
ordered from least to greatest. Each cost is the fraction of
all possible edges that exists in an undirected graph made from
this block's correlations (thresholded with the corresponding
threshold).
idc: integer or empty list, optional
Index in costlist corresponding to cost currently being
processed. By default, idc is an empty list.
costlist: array_like
List of costs that are being queried with the current function
in order.
Returns
-------
th: float
Threshold value in lk corresponding to the supplied cost. If
multiple entries matching cost exist, the smallest threshold
corresponding to these is returned. If no entries matching cost
are found, return the threshold corresponding to the previous
cost in costlist.
Notes
-----
The supplied cost must exactly match an entry in lk for a match to
be registered.
"""
return cost2thresh2(cost, sub, bl, axis0=None,
lk=lk, last = None, idc=idc,costlist = costlist)
def cost2thresh2(cost, sub, axis1, axis0, lk,
last = None, idc = [], costlist=[]):
"""A definition for loading the lookup table and finding the threshold
associated with a particular cost for a particular subject in a
particular block of data
Inputs
------
cost : float
cost value for which we need the associated threshold
sub : int
(axis -2) subject number
axis1 : int
axis 1 into lookup (eg block number or condition)
axis0 : int
axis 0 into lookup (eg subcondition)
lk : numpy array
lookup table (axis0 x axis1 x subject x 2 )
last : None
NOT USED last threshold value
idc : int or empty list
Index in costlist corresponding to cost currently being
processed. By default, idc is an empty list.
costlist : array-like
List of costs that are being queried with the current function
in order.
Returns
-------
threshold : float
threshold value for this cost"""
subject_lookup = slice_data(lk, sub, axis1, subcond=axis0)
index = np.where(subject_lookup[1] == cost)
threshold = subject_lookup[0][index]
if len(threshold) > 1:
threshold = threshold[0]
#if there are multiple thresholds, go down to the lower cost
####Is this right?!!!####
print('Subject %s has multiple thresholds at cost %s'%(sub, cost))
print('index 1: %s, index 2: %s'%(axis1, axis0))
elif len(threshold) < 1:
idc = idc-1
newcost = costlist[idc]
threshold = cost2thresh2(newcost, sub, axis1, axis0, lk,
idc=idc, costlist = costlist)
print(' '.join(['Subject %s does not have cost at %s'%(sub, cost),
'index 1: %s, index 2: %s'%(axis1, axis0),
'nearest cost %s being used'%(newcost)]))
else:
threshold = threshold[0]
return threshold
def apply_cost(corr_mat, cost, tot_edges):
"""Threshold corr_mat to achieve cost.
Return the thresholded matrix and the threshold value. In the
thresholded matrix, the main diagonal and upper triangle are set to
0, so information is held only in the lower triangle.
Parameters
----------
corr_mat: array_like
Square matrix with ROI-to-ROI correlations.
cost: float
Fraction of all possible undirected edges desired in the
thresholded matrix.
tot_edges: integer
The number of possible undirected edges in a graph with the
number of nodes in corr_mat.
Returns
-------
thresholded_mat: array_like
Square matrix with correlations below threshold set to 0,
making the fraction of matrix elements that are non-zero equal
to cost. In addition, the main diagonal and upper triangle are
set to 0.
threshold: float
Correlations below this value have been set to 0 in
thresholded_corr_mat.
Notes
-----
If not all correlations are unique, it is possible that there will
be no way to achieve the cost without, e.g., arbitrarily removing
one of two identical correlations while keeping the other. Instead
of making such an arbitrary choice, this function retains all
identical correlations equal to or greater than threshold, even if
this means cost is not exactly achieved.
"""
thresholded_mat = np.tril(corr_mat, -1)
n_nonzero = cost * tot_edges
elements = thresholded_mat.ravel()
threshold = elements[elements.argsort()[-n_nonzero]]
thresholded_mat[thresholded_mat < threshold] = 0
return thresholded_mat, threshold
def network_ind(ntwk_type,n_nodes):
"""Reads in a network type, number of nodes total and returns the indices of that network"""
net_core ="dACC L_aIfO R_aIfO L_aPFC R_aPFC L_aThal R_aThal".split()
net_fp = """L_frontcx R_frontcx L_IPL R_IPL L_IPS R_IPS L_PFC
R_PFC L_precuneus R_precuneus midcing""".split()
net_motor = """L_motor R_motor L_preSMA R_preSMA SMA""".split()
net_aal = " "
subnets = { 'g': net_core,
'b': net_fp,
'y': net_motor,
}
ALL_LABELS = net_core+net_fp +net_motor
if ntwk_type=='core':
roi_ind=range(0,7)
subnets = { 'g': net_core}
ALL_LABELS = net_core
elif ntwk_type=='FP':
roi_ind=range(7,18)
subnets = {'b': net_fp}
ALL_LABELS = net_fp
elif ntwk_type=='all':
roi_ind=range(0,n_nodes)
subnets = { 'g': net_core,
'b': net_fp }#,
#'y': net_motor,
#}
ALL_LABELS = net_core+net_fp# +net_motor
elif ntwk_type=='aal':
roi_ind=range(0,n_nodes)
subnets = {'k': net_aal}
ALL_LABELS = net_aal
else:
print('do not recognize network type')
return roi_ind,subnets,ALL_LABELS
#-----------------------------------------------------------------------------
# Numpy utilities - Note: these have been sent into numpy itself, so eventually
# we'll be able to get rid of them here.
#-----------------------------------------------------------------------------
def fill_diagonal(a,val):
"""Fill the main diagonal of the given array of any dimensionality.
For an array with ndim > 2, the diagonal is the list of locations with
indices a[i,i,...,i], all identical.
This function modifies the input array in-place, it does not return a
value.
This functionality can be obtained via diag_indices(), but internally this
version uses a much faster implementation that never constructs the indices
and uses simple slicing.
Parameters
----------
a : array, at least 2-dimensional.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
Examples
--------
>>> a = np.zeros((3,3),int)
>>> fill_diagonal(a,5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-d array:
>>> a = np.zeros((3,3,3,3),int)
>>> fill_diagonal(a,4)
We only show a few blocks for clarity:
>>> a[0,0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1,1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2,2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
See also
--------
- numpy.diag_indices: indices to access diagonals given shape information.
- numpy.diag_indices_from: indices to access diagonals given an array.
"""
return np.fill_diagonal(a,val)
def diag_indices(n,ndim=2):
"""Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array with ndim (>=2) dimensions and shape (n,n,...,n). For
ndim=2 this is the usual diagonal, for ndim>2 this is the set of indices
to access A[i,i,...,i] for i=[0..n-1].
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions
Examples
--------
Create a set of indices to access the diagonal of a (4,4) array:
>>> di = diag_indices(4)
>>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
>>> a
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
>>> a[di] = 100
>>> a
array([[100, 2, 3, 4],
[ 5, 100, 7, 8],
[ 9, 10, 100, 12],
[ 13, 14, 15, 100]])
Now, we create indices to manipulate a 3-d array:
>>> d3 = diag_indices(2,3)
And use it to set the diagonal of a zeros array to 1:
>>> a = np.zeros((2,2,2),int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 1]]])
See also
--------
- numpy.diag_indices_from: create the indices based on the shape of an existing
array.
"""
return np.diag_indices(n, ndim=ndim)
def diag_indices_from(arr):
"""Return the indices to access the main diagonal of an n-dimensional array.
See diag_indices() for full details.
Parameters
----------
arr : array, at least 2-d
"""
return np.diag_indices_from(arr)
def mask_indices(n,mask_func,k=0):
"""Return the indices to access (n,n) arrays, given a masking function.
Assume mask_func() is a function that, for a square array a of size (n,n)
with a possible offset argument k, when called as mask_func(a,k) returns a
new array with zeros in certain locations (functions like triu() or tril()
do precisely this). Then this function returns the indices where the
non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n,n).
mask_func : callable
A function whose api is similar to that of numpy.tri{u,l}. That is,
mask_func(x,k) returns a boolean array, shaped like x. k is an optional
argument to the function.
k : scalar
An optional argument which is passed through to mask_func(). Functions
like tri{u,l} take a second argument that is interpreted as an offset.
Returns
-------
indices : an n-tuple of index arrays.
The indices corresponding to the locations where mask_func(ones((n,n)),k)
is True.
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = mask_indices(3,np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3,3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
Then:
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = mask_indices(3,np.triu,1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = np.ones((n,n),int)
a = mask_func(m,k)
return np.where(a != 0)
def tril_indices(n,k=0):
"""Return the indices for the lower-triangle of an (n,n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see tril() for details).
Examples
--------
Commpute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = tril_indices(4)
>>> il2 = tril_indices(4,2)
Here is how they can be used with a sample array:
>>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
>>> a
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
Both for indexing:
>>> a[il1]
array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
See also
--------
- triu_indices : similar function, for upper-triangular.
- mask_indices : generic function accepting an arbitrary mask function.
"""
return np.tril_indices(n,k) #mask_indices(n,np.tril,k)
def tril_indices_from(arr,k=0):
"""Return the indices for the lower-triangle of an (n,n) array.
See tril_indices() for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see tril() for details).
"""
return np.tril_indices_from(arr, k)
def triu_indices(n,k=0):
"""Return the indices for the upper-triangle of an (n,n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see triu() for details).
Examples
--------
Commpute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = triu_indices(4)
>>> iu2 = triu_indices(4,2)
Here is how they can be used with a sample array:
>>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
>>> a
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
Both for indexing:
>>> a[iu1]
array([ 1, 2, 3, 4, 6, 7, 8, 11, 12, 16])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 5, -1, -1, -1],
[ 9, 10, -1, -1],
[13, 14, 15, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 5, -1, -1, -10],
[ 9, 10, -1, -1],
[ 13, 14, 15, -1]])
See also
--------
- tril_indices : similar function, for lower-triangular.
- mask_indices : generic function accepting an arbitrary mask function.
"""
return np.triu_indices(n,k) #mask_indices(n,np.triu,k)
def triu_indices_from(arr,k=0):
"""Return the indices for the lower-triangle of an (n,n) array.
See triu_indices() for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see triu() for details).
"""
return np.tri_indices_from(arr, k)
def structured_rand_arr(size, sample_func=np.random.random,
ltfac=None, utfac=None, fill_diag=None):
"""Make a structured random 2-d array of shape (size,size).
If no optional arguments are given, a symmetric array is returned.
Parameters
----------
size : int
Determines the shape of the output array: (size,size).
sample_func : function, optional.
Must be a function which when called with a 2-tuple of ints, returns a
2-d array of that shape. By default, np.random.random is used, but any
other sampling function can be used as long as it matches this API.
utfac : float, optional
Multiplicative factor for the upper triangular part of the matrix.
ltfac : float, optional
Multiplicative factor for the lower triangular part of the matrix.
fill_diag : float, optional
If given, use this value to fill in the diagonal. Otherwise the diagonal
will contain random elements.
Examples
--------
>>> np.random.seed(0) # for doctesting
>>> np.set_printoptions(precision=4) # for doctesting
>>> structured_rand_arr(4)
array([[ 0.5488, 0.7152, 0.6028, 0.5449],
[ 0.7152, 0.6459, 0.4376, 0.8918],
[ 0.6028, 0.4376, 0.7917, 0.5289],
[ 0.5449, 0.8918, 0.5289, 0.0871]])
>>> structured_rand_arr(4,ltfac=-10,utfac=10,fill_diag=0.5)
array([[ 0.5 , 8.3262, 7.7816, 8.7001],
[-8.3262, 0.5 , 4.6148, 7.8053],
[-7.7816, -4.6148, 0.5 , 9.4467],
[-8.7001, -7.8053, -9.4467, 0.5 ]])
"""
# Make a random array from the given sampling function
rmat = sample_func((size,size))
# And the empty one we'll then fill in to return
out = np.empty_like(rmat)
# Extract indices for upper-triangle, lower-triangle and diagonal
uidx = triu_indices(size,1)
lidx = tril_indices(size,-1)
didx = diag_indices(size)
# Extract each part from the original and copy it to the output, possibly
# applying multiplicative factors. We check the factors instead of
# defaulting to 1.0 to avoid unnecessary floating point multiplications
# which could be noticeable for very large sizes.
if utfac:
out[uidx] = utfac * rmat[uidx]
else:
out[uidx] = rmat[uidx]
if ltfac:
out[lidx] = ltfac * rmat.T[lidx]
else:
out[lidx] = rmat.T[lidx]
# If fill_diag was provided, use it; otherwise take the values in the
# diagonal from the original random array.
if fill_diag is not None:
out[didx] = fill_diag
else:
out[didx] = rmat[didx]
return out
def symm_rand_arr(size,sample_func=np.random.random,fill_diag=None):
"""Make a symmetric random 2-d array of shape (size,size).
Parameters
----------
size : int
Size of the output array.
sample_func : function, optional.
Must be a function which when called with a 2-tuple of ints, returns a
2-d array of that shape. By default, np.random.random is used, but any
other sampling function can be used as long as it matches this API.
fill_diag : float, optional
If given, use this value to fill in the diagonal.
Examples
--------
>>> np.random.seed(0) # for doctesting
>>> np.set_printoptions(precision=4) # for doctesting
>>> symm_rand_arr(4)
array([[ 0.5488, 0.7152, 0.6028, 0.5449],
[ 0.7152, 0.6459, 0.4376, 0.8918],
[ 0.6028, 0.4376, 0.7917, 0.5289],
[ 0.5449, 0.8918, 0.5289, 0.0871]])
>>> symm_rand_arr(4,fill_diag=4)
array([[ 4. , 0.8326, 0.7782, 0.87 ],
[ 0.8326, 4. , 0.4615, 0.7805],
[ 0.7782, 0.4615, 4. , 0.9447],
[ 0.87 , 0.7805, 0.9447, 4. ]])
"""
return structured_rand_arr(size,sample_func,fill_diag=fill_diag)
def antisymm_rand_arr(size,sample_func=np.random.random):
"""Make an anti-symmetric random 2-d array of shape (size,size).
Parameters
----------
n : int
Size of the output array.
sample_func : function, optional.
Must be a function which when called with a 2-tuple of ints, returns a
2-d array of that shape. By default, np.random.random is used, but any
other sampling function can be used as long as matches this API.
Examples
--------
>>> np.random.seed(0) # for doctesting
>>> np.set_printoptions(precision=4) # for doctesting
>>> antisymm_rand_arr(4)
array([[ 0. , 0.7152, 0.6028, 0.5449],
[-0.7152, 0. , 0.4376, 0.8918],
[-0.6028, -0.4376, 0. , 0.5289],
[-0.5449, -0.8918, -0.5289, 0. ]])
"""
return structured_rand_arr(size,sample_func,ltfac=-1.0,fill_diag=0)
def diag_stack(tup):
"""Stack arrays in sequence diagonally (block wise).
Take a sequence of arrays and stack them diagonally to make a single block
array.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first two axes.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays together.
vsplit : Split array into a list of multiple sub-arrays vertically.
Examples
--------
"""
# Find number of rows and columns needed
shapes = np.array([a.shape for a in tup], int)
sums = shapes.sum(0)
nrow = sums[0]
ncol = sums[1]
out = np.zeros((nrow, ncol), tup[0].dtype)
row_offset = 0
col_offset = 0
for arr in tup:
nr, nc = arr.shape
row_end = row_offset+nr
col_end = col_offset+nc
out[row_offset:row_end, col_offset:col_end] = arr
row_offset, col_offset = row_end, col_end
return out
def array_to_string(part):
"""The purpose of this function is to convert an array of numbers into
a list of strings. Mainly for use with the plot_partition function that
requires a dict of strings for node labels.
"""
out_part=dict.fromkeys(part)
for m in part.iterkeys():
out_part[m]=str(part[m])
return out_part
def compare_dicts(d1,d2):
"""Function that reads in two dictionaries of sets (i.e. a graph partition) and assess how similar they are.
Needs to be updated so that it can adjust this measure to include partitions that are pretty close."""
if len(d1)>len(d2):
longest_dict=len(d1)
else:
longest_dict=len(d2)
check=0
#loop through the keys in the first dict
for m1,val1 in d1.items():
#compare to the values in each key of the second dict
for m2,val2 in d2.items():
if val1 == val2:
check+=1
return float(check)/longest_dict
def assert_no_empty_modules(part):
"""Asserts that a partition contains no empty moudles.
This function raises a ValueError exception if the input partition has an
empty module.
Parameters
----------
part : dict
A dict describing a graph partition.
"""
for label, mod in part.items():
if len(mod)==0:
raise ValueError("Module %s in partition is empty" % label)
| bsd-3-clause |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py | 69 | 22336 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%d" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| agpl-3.0 |
mufid/berkilau | ws/CSUIBotClass2014/util/plotter.py | 1 | 1539 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
def plot(X, m, x_star, t):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim((m['left-wall'],m['right-wall']))
ax.set_xticks(np.linspace(m['left-wall'],m['right-wall'],11))
ax.set_ylim((0.0, 0.1))
ax.set_yticks(np.linspace(0.0,0.1,5))
ax.set_xlabel('robot position: x')
ax.set_ylabel('bel(x)')
fig.suptitle('Localization at t= ' + str(t))
coords = [[(x[0],0),(x[0],x[1])] for x in X]
line_segments = LineCollection(coords, linewidths = 1.5, colors = colorConverter.to_rgba('r'), linestyle = 'solid')
ax.add_collection(line_segments)
ax.annotate('The BOT', xy=(x_star, 0.0), xytext=(5.0, 0.050), arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center')
ax.annotate('left-door', xy=(m['left-door'], 0.0), xytext=(m['left-door'], 0.075), arrowprops=dict(facecolor='blue', shrink=0.05), horizontalalignment='center', verticalalignment='center')
ax.annotate('middle-door', xy=(m['middle-door'], 0.0), xytext=(m['middle-door'], 0.075), arrowprops=dict(facecolor='blue', shrink=0.05), horizontalalignment='center', verticalalignment='center')
ax.annotate('right-door', xy=(m['right-door'], 0.0), xytext=(m['right-door'], 0.075), arrowprops=dict(facecolor='blue', shrink=0.05), horizontalalignment='center', verticalalignment='center')
return fig
| mit |
bmazin/ARCONS-pipeline | examples/Pal2013_throughput/testImageStack.py | 1 | 5561 | '''
Author: Julian van Eyken Date: May 31 2013
A bit of photon-list image stacking testing....
'''
import warnings
import pickle
import os.path
import glob
import scipy.stats
import numpy as np
#from astropy import coordinates as coord
import matplotlib.pyplot as mpl
import photonlist.photlist as pl
import photonlist.RADecImage as rdi
from util.FileName import FileName
from util import utils
def makeImageStack(fileNames='photons_*.h5', dir=os.getenv('MKID_PROC_PATH', default="/Scratch")+'/photonLists/20131209',
detImage=False, saveFileName='stackedImage.pkl', wvlMin=None,
wvlMax=None, doWeighted=True, medCombine=False, vPlateScale=0.2,
nPixRA=250,nPixDec=250):
'''
Create an image stack
INPUTS:
filenames - string, list of photon-list .h5 files. Can either
use wildcards (e.g. 'mydirectory/*.h5') or if string
starts with an @, supply a text file which contains
a list of file names to stack. (e.g.,
'mydirectory/@myfilelist.txt', where myfilelist.txt
is a simple text file with one file name per line.)
dir - to provide name of a directory in which to find the files
detImage - if True, show the images in detector x,y coordinates instead
of transforming to RA/dec space.
saveFileName - name of output pickle file for saving final resulting object.
doWeighted - boolean, if True, do the image flatfield weighting.
medCombine - experimental, if True, do a median combine of the image stack
instead of just adding them all.... Prob. should be implemented
properly at some point, just a fudge for now.
vPlateScale - (arcsec/virtual pixel) - to set the plate scale of the virtual
pixels in the outputs image.
nPixRA,nPixDec - size of virtual pixel grid in output image.
OUTPUTS:
Returns a stacked image object, saves the same out to a pickle file, and
(depending whether it's still set to or not) saves out the individual non-
stacked images as it goes.
'''
#Get the list of filenames
if fileNames[0]=='@':
#(Note, actually untested, but should be more or less right...)
files=[]
with open(fileNames[1:]) as f:
for line in f:
files.append(os.path.join(dir,line.strip()))
else:
files = glob.glob(os.path.join(dir, fileNames))
#Initialise empty image centered on Crab Pulsar
virtualImage = rdi.RADecImage(nPixRA=nPixRA,nPixDec=nPixDec,vPlateScale=vPlateScale,
cenRA=3.20238771, cenDec=0.574944617)
imageStack = []
for eachFile in files:
if os.path.exists(eachFile):
print 'Loading: ',os.path.basename(eachFile)
#fullFileName=os.path.join(dir,eachFile)
phList = pl.PhotList(eachFile)
baseSaveName,ext=os.path.splitext(os.path.basename(eachFile))
if detImage is True:
imSaveName=baseSaveName+'det.tif'
im = phList.getImageDet(wvlMin=wvlMin,wvlMax=wvlMax)
utils.plotArray(im)
mpl.imsave(fname=imSaveName,arr=im,colormap=mpl.cm.gnuplot2,origin='lower')
if eachFile==files[0]:
virtualImage=im
else:
virtualImage+=im
else:
imSaveName=baseSaveName+'.tif'
virtualImage.loadImage(phList,doStack=not medCombine,savePreStackImage=imSaveName,
wvlMin=wvlMin, wvlMax=wvlMax, doWeighted=doWeighted)
imageStack.append(virtualImage.image*virtualImage.expTimeWeights) #Only makes sense if medCombine==True, otherwise will be ignored
if medCombine==True:
medComImage = scipy.stats.nanmedian(np.array(imageStack), axis=0)
normMin = np.percentile(medComImage[np.isfinite(medComImage)],q=0.1)
normMax = np.percentile(medComImage[np.isfinite(medComImage)],q=99.9)
toDisplay = np.copy(medComImage)
toDisplay[~np.isfinite(toDisplay)] = 0
#utils.plotArray(toDisplay,normMin=normMin,normMax=normMax)
else:
#virtualImage.display(pclip=0.1)
medComImage = None
else:
print 'File doesn''t exist: ',eachFile
#Save the results
try:
output = open(saveFileName,'wb')
pickle.dump(virtualImage,output,-1)
output.close()
except:
warnings.warn('Unable to save results for some reason...')
return virtualImage, imageStack, medComImage
def checkRotationDirection():
'''
Just create two Crab images which should be rotated by a significant
amount with respect to each other on the detector, and see if they're
properly de-rotated
'''
plFN1 = FileName(run='PAL2012',date='20121211',tstamp='20121212-033323').photonList()
plFN2 = FileName(run='PAL2012',date='20121211',tstamp='20121212-045902').photonList()
vIm1 = rdi.RADecImage(pl.PhotList(plFN1))
vIm2 = rdi.RADecImage(pl.PhotList(plFN2))
vIm1.display()
vIm2.display()
return vIm1,vIm2
if __name__ == '__main__':
makeImageStack()
| gpl-2.0 |
apache/arrow | dev/archery/archery/integration/datagen.py | 3 | 46538 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple, OrderedDict
import binascii
import json
import os
import random
import tempfile
import numpy as np
from .util import frombytes, tobytes, random_bytes, random_utf8
def metadata_key_values(pairs):
return [{'key': k, 'value': v} for k, v in pairs]
class Field(object):
def __init__(self, name, *, nullable=True, metadata=None):
self.name = name
self.nullable = nullable
self.metadata = metadata or []
def get_json(self):
entries = [
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children()),
]
dct = self._get_dictionary()
if dct:
entries.append(('dictionary', dct))
if self.metadata is not None and len(self.metadata) > 0:
entries.append(('metadata', metadata_key_values(self.metadata)))
return OrderedDict(entries)
def _get_dictionary(self):
return None
def _make_is_valid(self, size, null_probability=0.4):
if self.nullable:
return (np.random.random_sample(size) > null_probability
).astype(np.int8)
else:
return np.ones(size, dtype=np.int8)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def __len__(self):
return self.count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveField(Field):
def _get_children(self):
return []
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
super().__init__(name, count)
self.is_valid = is_valid
self.values = values
def _encode_value(self, x):
return x
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list([self._encode_value(x) for x in self.values]))
]
class NullColumn(Column):
# This subclass is for readability only
pass
class NullField(PrimitiveField):
def __init__(self, name, metadata=None):
super().__init__(name, nullable=True,
metadata=metadata)
def _get_type(self):
return OrderedDict([('name', 'null')])
def generate_column(self, size, name=None):
return NullColumn(name or self.name, size)
TEST_INT_MAX = 2 ** 31 - 1
TEST_INT_MIN = ~TEST_INT_MAX
class IntegerField(PrimitiveField):
def __init__(self, name, is_signed, bit_width, *, nullable=True,
metadata=None,
min_value=TEST_INT_MIN,
max_value=TEST_INT_MAX):
super().__init__(name, nullable=nullable,
metadata=metadata)
self.is_signed = is_signed
self.bit_width = bit_width
self.min_value = min_value
self.max_value = max_value
def _get_generated_data_bounds(self):
if self.is_signed:
signed_iinfo = np.iinfo('int' + str(self.bit_width))
min_value, max_value = signed_iinfo.min, signed_iinfo.max
else:
unsigned_iinfo = np.iinfo('uint' + str(self.bit_width))
min_value, max_value = 0, unsigned_iinfo.max
lower_bound = max(min_value, self.min_value)
upper_bound = min(max_value, self.max_value)
return lower_bound, upper_bound
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size, name=None):
lower_bound, upper_bound = self._get_generated_data_bounds()
return self.generate_range(size, lower_bound, upper_bound,
name=name, include_extremes=True)
def generate_range(self, size, lower, upper, name=None,
include_extremes=False):
values = np.random.randint(lower, upper, size=size, dtype=np.int64)
if include_extremes and size >= 2:
values[:2] = [lower, upper]
values = list(map(int if self.bit_width < 64 else str, values))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class DateField(IntegerField):
DAY = 0
MILLISECOND = 1
# 1/1/1 to 12/31/9999
_ranges = {
DAY: [-719162, 2932896],
MILLISECOND: [-62135596800000, 253402214400000]
}
def __init__(self, name, unit, *, nullable=True, metadata=None):
bit_width = 32 if unit == self.DAY else 64
min_value, max_value = self._ranges[unit]
super().__init__(
name, True, bit_width,
nullable=nullable, metadata=metadata,
min_value=min_value, max_value=max_value
)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'date'),
('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND')
])
TIMEUNIT_NAMES = {
's': 'SECOND',
'ms': 'MILLISECOND',
'us': 'MICROSECOND',
'ns': 'NANOSECOND'
}
class TimeField(IntegerField):
BIT_WIDTHS = {
's': 32,
'ms': 32,
'us': 64,
'ns': 64
}
_ranges = {
's': [0, 86400],
'ms': [0, 86400000],
'us': [0, 86400000000],
'ns': [0, 86400000000000]
}
def __init__(self, name, unit='s', *, nullable=True,
metadata=None):
min_val, max_val = self._ranges[unit]
super().__init__(name, True, self.BIT_WIDTHS[unit],
nullable=nullable, metadata=metadata,
min_value=min_val, max_value=max_val)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'time'),
('unit', TIMEUNIT_NAMES[self.unit]),
('bitWidth', self.bit_width)
])
class TimestampField(IntegerField):
# 1/1/1 to 12/31/9999
_ranges = {
's': [-62135596800, 253402214400],
'ms': [-62135596800000, 253402214400000],
'us': [-62135596800000000, 253402214400000000],
# Physical range for int64, ~584 years and change
'ns': [np.iinfo('int64').min, np.iinfo('int64').max]
}
def __init__(self, name, unit='s', tz=None, *, nullable=True,
metadata=None):
min_val, max_val = self._ranges[unit]
super().__init__(name, True, 64,
nullable=nullable,
metadata=metadata,
min_value=min_val,
max_value=max_val)
self.unit = unit
self.tz = tz
def _get_type(self):
fields = [
('name', 'timestamp'),
('unit', TIMEUNIT_NAMES[self.unit])
]
if self.tz is not None:
fields.append(('timezone', self.tz))
return OrderedDict(fields)
class DurationIntervalField(IntegerField):
def __init__(self, name, unit='s', *, nullable=True,
metadata=None):
min_val, max_val = np.iinfo('int64').min, np.iinfo('int64').max,
super().__init__(
name, True, 64,
nullable=nullable, metadata=metadata,
min_value=min_val, max_value=max_val)
self.unit = unit
def _get_type(self):
fields = [
('name', 'duration'),
('unit', TIMEUNIT_NAMES[self.unit])
]
return OrderedDict(fields)
class YearMonthIntervalField(IntegerField):
def __init__(self, name, *, nullable=True, metadata=None):
min_val, max_val = [-10000*12, 10000*12] # +/- 10000 years.
super().__init__(
name, True, 32,
nullable=nullable, metadata=metadata,
min_value=min_val, max_value=max_val)
def _get_type(self):
fields = [
('name', 'interval'),
('unit', 'YEAR_MONTH'),
]
return OrderedDict(fields)
class DayTimeIntervalField(PrimitiveField):
def __init__(self, name, *, nullable=True, metadata=None):
super().__init__(name,
nullable=True,
metadata=metadata)
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'interval'),
('unit', 'DAY_TIME'),
])
def generate_column(self, size, name=None):
min_day_value, max_day_value = -10000*366, 10000*366
values = [{'days': random.randint(min_day_value, max_day_value),
'milliseconds': random.randint(-86400000, +86400000)}
for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class FloatingPointField(PrimitiveField):
def __init__(self, name, bit_width, *, nullable=True,
metadata=None):
super().__init__(name,
nullable=nullable,
metadata=metadata)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size, name=None):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
DECIMAL_PRECISION_TO_VALUE = {
key: (1 << (8 * i - 1)) - 1 for i, key in enumerate(
[1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36,
40, 42, 44, 50, 60, 70],
start=1,
)
}
def decimal_range_from_precision(precision):
assert 1 <= precision <= 76
try:
max_value = DECIMAL_PRECISION_TO_VALUE[precision]
except KeyError:
return decimal_range_from_precision(precision - 1)
else:
return ~max_value, max_value
class DecimalField(PrimitiveField):
def __init__(self, name, precision, scale, bit_width, *,
nullable=True, metadata=None):
super().__init__(name, nullable=True,
metadata=metadata)
self.precision = precision
self.scale = scale
self.bit_width = bit_width
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'decimal'),
('precision', self.precision),
('scale', self.scale),
('bitWidth', self.bit_width),
])
def generate_column(self, size, name=None):
min_value, max_value = decimal_range_from_precision(self.precision)
values = [random.randint(min_value, max_value) for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return DecimalColumn(name, size, is_valid, values, self.bit_width)
class DecimalColumn(PrimitiveColumn):
def __init__(self, name, count, is_valid, values, bit_width):
super().__init__(name, count, is_valid, values)
self.bit_width = bit_width
def _encode_value(self, x):
return str(x)
class BooleanField(PrimitiveField):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size, name=None):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class FixedSizeBinaryField(PrimitiveField):
def __init__(self, name, byte_width, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable,
metadata=metadata)
self.byte_width = byte_width
@property
def numpy_type(self):
return object
@property
def column_class(self):
return FixedSizeBinaryColumn
def _get_type(self):
return OrderedDict([('name', 'fixedsizebinary'),
('byteWidth', self.byte_width)])
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
values.append(random_bytes(self.byte_width))
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class BinaryField(PrimitiveField):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def _random_sizes(self, size):
return np.random.exponential(scale=4, size=size).astype(np.int32)
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = []
sizes = self._random_sizes(size)
for i, nbytes in enumerate(sizes):
if is_valid[i]:
values.append(random_bytes(nbytes))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class StringField(BinaryField):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(tobytes(random_utf8(K)))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class LargeBinaryField(BinaryField):
@property
def column_class(self):
return LargeBinaryColumn
def _get_type(self):
return OrderedDict([('name', 'largebinary')])
class LargeStringField(StringField):
@property
def column_class(self):
return LargeStringColumn
def _get_type(self):
return OrderedDict([('name', 'largeutf8')])
class Schema(object):
def __init__(self, fields, metadata=None):
self.fields = fields
self.metadata = metadata
def get_json(self):
entries = [
('fields', [field.get_json() for field in self.fields])
]
if self.metadata is not None and len(self.metadata) > 0:
entries.append(('metadata', metadata_key_values(self.metadata)))
return OrderedDict(entries)
class _NarrowOffsetsMixin:
def _encode_offsets(self, offsets):
return list(map(int, offsets))
class _LargeOffsetsMixin:
def _encode_offsets(self, offsets):
# 64-bit offsets have to be represented as strings to roundtrip
# through JSON.
return list(map(str, offsets))
class _BaseBinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return frombytes(binascii.hexlify(x).upper())
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = b""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', self._encode_offsets(offsets)),
('DATA', data)
]
class _BaseStringColumn(_BaseBinaryColumn):
def _encode_value(self, x):
return frombytes(x)
class BinaryColumn(_BaseBinaryColumn, _NarrowOffsetsMixin):
pass
class StringColumn(_BaseStringColumn, _NarrowOffsetsMixin):
pass
class LargeBinaryColumn(_BaseBinaryColumn, _LargeOffsetsMixin):
pass
class LargeStringColumn(_BaseStringColumn, _LargeOffsetsMixin):
pass
class FixedSizeBinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return frombytes(binascii.hexlify(x).upper())
def _get_buffers(self):
data = []
for i, v in enumerate(self.values):
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('DATA', data)
]
class ListField(Field):
def __init__(self, name, value_field, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable,
metadata=metadata)
self.value_field = value_field
@property
def column_class(self):
return ListColumn
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_field.get_json()]
def generate_column(self, size, name=None):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_field.generate_column(offset)
if name is None:
name = self.name
return self.column_class(name, size, is_valid, offsets, values)
class LargeListField(ListField):
@property
def column_class(self):
return LargeListColumn
def _get_type(self):
return OrderedDict([
('name', 'largelist')
])
class _BaseListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
super().__init__(name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', self._encode_offsets(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class ListColumn(_BaseListColumn, _NarrowOffsetsMixin):
pass
class LargeListColumn(_BaseListColumn, _LargeOffsetsMixin):
pass
class MapField(Field):
def __init__(self, name, key_field, item_field, *, nullable=True,
metadata=None, keys_sorted=False, entries_name='entries'):
super().__init__(name, nullable=nullable,
metadata=metadata)
assert not key_field.nullable
self.key_field = key_field
self.item_field = item_field
self.pair_field = StructField(entries_name, [key_field, item_field],
nullable=False)
self.keys_sorted = keys_sorted
def _get_type(self):
return OrderedDict([
('name', 'map'),
('keysSorted', self.keys_sorted)
])
def _get_children(self):
return [self.pair_field.get_json()]
def generate_column(self, size, name=None):
MAX_MAP_SIZE = 4
is_valid = self._make_is_valid(size)
map_sizes = np.random.randint(0, MAX_MAP_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(map_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
pairs = self.pair_field.generate_column(offset)
if name is None:
name = self.name
return MapColumn(name, size, is_valid, offsets, pairs)
class MapColumn(Column):
def __init__(self, name, count, is_valid, offsets, pairs):
super().__init__(name, count)
self.is_valid = is_valid
self.offsets = offsets
self.pairs = pairs
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.pairs.get_json()]
class FixedSizeListField(Field):
def __init__(self, name, value_field, list_size, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable,
metadata=metadata)
self.value_field = value_field
self.list_size = list_size
def _get_type(self):
return OrderedDict([
('name', 'fixedsizelist'),
('listSize', self.list_size)
])
def _get_children(self):
return [self.value_field.get_json()]
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = self.value_field.generate_column(size * self.list_size)
if name is None:
name = self.name
return FixedSizeListColumn(name, size, is_valid, values)
class FixedSizeListColumn(Column):
def __init__(self, name, count, is_valid, values):
super().__init__(name, count)
self.is_valid = is_valid
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [self.values.get_json()]
class StructField(Field):
def __init__(self, name, fields, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable,
metadata=metadata)
self.fields = fields
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [field.get_json() for field in self.fields]
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
field_values = [field.generate_column(size) for field in self.fields]
if name is None:
name = self.name
return StructColumn(name, size, is_valid, field_values)
class _BaseUnionField(Field):
def __init__(self, name, fields, type_ids=None, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable, metadata=metadata)
if type_ids is None:
type_ids = list(range(fields))
else:
assert len(fields) == len(type_ids)
self.fields = fields
self.type_ids = type_ids
assert all(x >= 0 for x in self.type_ids)
def _get_type(self):
return OrderedDict([
('name', 'union'),
('mode', self.mode),
('typeIds', self.type_ids),
])
def _get_children(self):
return [field.get_json() for field in self.fields]
def _make_type_ids(self, size):
return np.random.choice(self.type_ids, size)
class SparseUnionField(_BaseUnionField):
mode = 'SPARSE'
def generate_column(self, size, name=None):
array_type_ids = self._make_type_ids(size)
field_values = [field.generate_column(size) for field in self.fields]
if name is None:
name = self.name
return SparseUnionColumn(name, size, array_type_ids, field_values)
class DenseUnionField(_BaseUnionField):
mode = 'DENSE'
def generate_column(self, size, name=None):
# Reverse mapping {logical type id => physical child id}
child_ids = [None] * (max(self.type_ids) + 1)
for i, type_id in enumerate(self.type_ids):
child_ids[type_id] = i
array_type_ids = self._make_type_ids(size)
offsets = []
child_sizes = [0] * len(self.fields)
for i in range(size):
child_id = child_ids[array_type_ids[i]]
offset = child_sizes[child_id]
offsets.append(offset)
child_sizes[child_id] = offset + 1
field_values = [
field.generate_column(child_size)
for field, child_size in zip(self.fields, child_sizes)]
if name is None:
name = self.name
return DenseUnionColumn(name, size, array_type_ids, offsets,
field_values)
class Dictionary(object):
def __init__(self, id_, field, size, name=None, ordered=False):
self.id_ = id_
self.field = field
self.values = field.generate_column(size=size, name=name)
self.ordered = ordered
def __len__(self):
return len(self.values)
def get_json(self):
dummy_batch = RecordBatch(len(self.values), [self.values])
return OrderedDict([
('id', self.id_),
('data', dummy_batch.get_json())
])
class DictionaryField(Field):
def __init__(self, name, index_field, dictionary, *, nullable=True,
metadata=None):
super().__init__(name, nullable=nullable,
metadata=metadata)
assert index_field.name == ''
assert isinstance(index_field, IntegerField)
assert isinstance(dictionary, Dictionary)
self.index_field = index_field
self.dictionary = dictionary
def _get_type(self):
return self.dictionary.field._get_type()
def _get_children(self):
return self.dictionary.field._get_children()
def _get_dictionary(self):
return OrderedDict([
('id', self.dictionary.id_),
('indexType', self.index_field._get_type()),
('isOrdered', self.dictionary.ordered)
])
def generate_column(self, size, name=None):
if name is None:
name = self.name
return self.index_field.generate_range(size, 0, len(self.dictionary),
name=name)
ExtensionType = namedtuple(
'ExtensionType', ['extension_name', 'serialized', 'storage_field'])
class ExtensionField(Field):
def __init__(self, name, extension_type, *, nullable=True, metadata=None):
metadata = (metadata or []) + [
('ARROW:extension:name', extension_type.extension_name),
('ARROW:extension:metadata', extension_type.serialized),
]
super().__init__(name, nullable=nullable, metadata=metadata)
self.extension_type = extension_type
def _get_type(self):
return self.extension_type.storage_field._get_type()
def _get_children(self):
return self.extension_type.storage_field._get_children()
def _get_dictionary(self):
return self.extension_type.storage_field._get_dictionary()
def generate_column(self, size, name=None):
if name is None:
name = self.name
return self.extension_type.storage_field.generate_column(size, name)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
super().__init__(name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class SparseUnionColumn(Column):
def __init__(self, name, count, type_ids, field_values):
super().__init__(name, count)
self.type_ids = type_ids
self.field_values = field_values
def _get_buffers(self):
return [
('TYPE_ID', [int(v) for v in self.type_ids])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class DenseUnionColumn(Column):
def __init__(self, name, count, type_ids, offsets, field_values):
super().__init__(name, count)
self.type_ids = type_ids
self.offsets = offsets
self.field_values = field_values
def _get_buffers(self):
return [
('TYPE_ID', [int(v) for v in self.type_ids]),
('OFFSET', [int(v) for v in self.offsets]),
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class RecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class File(object):
def __init__(self, name, schema, batches, dictionaries=None,
skip=None, path=None):
self.name = name
self.schema = schema
self.dictionaries = dictionaries or []
self.batches = batches
self.skip = set()
self.path = path
if skip:
self.skip.update(skip)
def get_json(self):
entries = [
('schema', self.schema.get_json())
]
if len(self.dictionaries) > 0:
entries.append(('dictionaries',
[dictionary.get_json()
for dictionary in self.dictionaries]))
entries.append(('batches', [batch.get_json()
for batch in self.batches]))
return OrderedDict(entries)
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
self.path = path
def skip_category(self, category):
"""Skip this test for the given category.
Category should be SKIP_ARROW or SKIP_FLIGHT.
"""
self.skip.add(category)
return self
def get_field(name, type_, **kwargs):
if type_ == 'binary':
return BinaryField(name, **kwargs)
elif type_ == 'utf8':
return StringField(name, **kwargs)
elif type_ == 'largebinary':
return LargeBinaryField(name, **kwargs)
elif type_ == 'largeutf8':
return LargeStringField(name, **kwargs)
elif type_.startswith('fixedsizebinary_'):
byte_width = int(type_.split('_')[1])
return FixedSizeBinaryField(name, byte_width=byte_width, **kwargs)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
signed = dtype.kind == 'i'
bit_width = dtype.itemsize * 8
return IntegerField(name, signed, bit_width, **kwargs)
elif dtype.kind == 'f':
bit_width = dtype.itemsize * 8
return FloatingPointField(name, bit_width, **kwargs)
elif dtype.kind == 'b':
return BooleanField(name, **kwargs)
else:
raise TypeError(dtype)
def _generate_file(name, fields, batch_sizes, dictionaries=None, skip=None,
metadata=None):
schema = Schema(fields, metadata=metadata)
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(RecordBatch(size, columns))
return File(name, schema, batches, dictionaries, skip=skip)
def generate_custom_metadata_case():
def meta(items):
# Generate a simple block of metadata where each value is '{}'.
# Keys are delimited by whitespace in `items`.
return [(k, '{}') for k in items.split()]
fields = [
get_field('sort_of_pandas', 'int8', metadata=meta('pandas')),
get_field('lots_of_meta', 'int8', metadata=meta('a b c d .. w x y z')),
get_field(
'unregistered_extension', 'int8',
metadata=[
('ARROW:extension:name', '!nonexistent'),
('ARROW:extension:metadata', ''),
('ARROW:integration:allow_unregistered_extension', 'true'),
]),
ListField('list_with_odd_values',
get_field('item', 'int32', metadata=meta('odd_values'))),
]
batch_sizes = [1]
return _generate_file('custom_metadata', fields, batch_sizes,
metadata=meta('schema_custom_0 schema_custom_1'))
def generate_duplicate_fieldnames_case():
fields = [
get_field('ints', 'int8'),
get_field('ints', 'int32'),
StructField('struct', [get_field('', 'int32'), get_field('', 'utf8')]),
]
batch_sizes = [1]
return _generate_file('duplicate_fieldnames', fields, batch_sizes)
def generate_primitive_case(batch_sizes, name='primitive'):
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8',
'fixedsizebinary_19', 'fixedsizebinary_120']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, nullable=True))
fields.append(get_field(type_ + "_nonnullable", type_, nullable=False))
return _generate_file(name, fields, batch_sizes)
def generate_primitive_large_offsets_case(batch_sizes):
types = ['largebinary', 'largeutf8']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, nullable=True))
fields.append(get_field(type_ + "_nonnullable", type_, nullable=False))
return _generate_file('primitive_large_offsets', fields, batch_sizes)
def generate_null_case(batch_sizes):
# Interleave null with non-null types to ensure the appropriate number of
# buffers (0) is read and written
fields = [
NullField(name='f0'),
get_field('f1', 'int32'),
NullField(name='f2'),
get_field('f3', 'float64'),
NullField(name='f4')
]
return _generate_file('null', fields, batch_sizes)
def generate_null_trivial_case(batch_sizes):
# Generate a case with no buffers
fields = [
NullField(name='f0'),
]
return _generate_file('null_trivial', fields, batch_sizes)
def generate_decimal128_case():
fields = [
DecimalField(name='f{}'.format(i), precision=precision, scale=2,
bit_width=128)
for i, precision in enumerate(range(3, 39))
]
possible_batch_sizes = 7, 10
batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))]
# 'decimal' is the original name for the test, and it must match
# provide "gold" files that test backwards compatibility, so they
# can be appropriately skipped.
return _generate_file('decimal', fields, batch_sizes)
def generate_decimal256_case():
fields = [
DecimalField(name='f{}'.format(i), precision=precision, scale=5,
bit_width=256)
for i, precision in enumerate(range(37, 70))
]
possible_batch_sizes = 7, 10
batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))]
return _generate_file('decimal256', fields, batch_sizes)
def generate_datetime_case():
fields = [
DateField('f0', DateField.DAY),
DateField('f1', DateField.MILLISECOND),
TimeField('f2', 's'),
TimeField('f3', 'ms'),
TimeField('f4', 'us'),
TimeField('f5', 'ns'),
TimestampField('f6', 's'),
TimestampField('f7', 'ms'),
TimestampField('f8', 'us'),
TimestampField('f9', 'ns'),
TimestampField('f10', 'ms', tz=None),
TimestampField('f11', 's', tz='UTC'),
TimestampField('f12', 'ms', tz='US/Eastern'),
TimestampField('f13', 'us', tz='Europe/Paris'),
TimestampField('f14', 'ns', tz='US/Pacific'),
]
batch_sizes = [7, 10]
return _generate_file("datetime", fields, batch_sizes)
def generate_interval_case():
fields = [
DurationIntervalField('f1', 's'),
DurationIntervalField('f2', 'ms'),
DurationIntervalField('f3', 'us'),
DurationIntervalField('f4', 'ns'),
YearMonthIntervalField('f5'),
DayTimeIntervalField('f6'),
]
batch_sizes = [7, 10]
return _generate_file("interval", fields, batch_sizes)
def generate_map_case():
fields = [
MapField('map_nullable', get_field('key', 'utf8', nullable=False),
get_field('value', 'int32')),
]
batch_sizes = [7, 10]
return _generate_file("map", fields, batch_sizes)
def generate_non_canonical_map_case():
fields = [
MapField('map_other_names',
get_field('some_key', 'utf8', nullable=False),
get_field('some_value', 'int32'),
entries_name='some_entries'),
]
batch_sizes = [7]
return _generate_file("map_non_canonical", fields, batch_sizes)
def generate_nested_case():
fields = [
ListField('list_nullable', get_field('item', 'int32')),
FixedSizeListField('fixedsizelist_nullable',
get_field('item', 'int32'), 4),
StructField('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# Fails on Go (ARROW-8452)
# ListField('list_nonnullable', get_field('item', 'int32'),
# nullable=False),
]
batch_sizes = [7, 10]
return _generate_file("nested", fields, batch_sizes)
def generate_recursive_nested_case():
fields = [
ListField('lists_list',
ListField('inner_list', get_field('item', 'int16'))),
ListField('structs_list',
StructField('inner_struct',
[get_field('f1', 'int32'),
get_field('f2', 'utf8')])),
]
batch_sizes = [7, 10]
return _generate_file("recursive_nested", fields, batch_sizes)
def generate_nested_large_offsets_case():
fields = [
LargeListField('large_list_nullable', get_field('item', 'int32')),
LargeListField('large_list_nonnullable',
get_field('item', 'int32'), nullable=False),
LargeListField('large_list_nested',
ListField('inner_list', get_field('item', 'int16'))),
]
batch_sizes = [0, 13]
return _generate_file("nested_large_offsets", fields, batch_sizes)
def generate_unions_case():
fields = [
SparseUnionField('sparse', [get_field('f1', 'int32'),
get_field('f2', 'utf8')],
type_ids=[5, 7]),
DenseUnionField('dense', [get_field('f1', 'int16'),
get_field('f2', 'binary')],
type_ids=[10, 20]),
SparseUnionField('sparse', [get_field('f1', 'float32', nullable=False),
get_field('f2', 'bool')],
type_ids=[5, 7], nullable=False),
DenseUnionField('dense', [get_field('f1', 'uint8', nullable=False),
get_field('f2', 'uint16'),
NullField('f3')],
type_ids=[42, 43, 44], nullable=False),
]
batch_sizes = [0, 11]
return _generate_file("union", fields, batch_sizes)
def generate_dictionary_case():
dict0 = Dictionary(0, StringField('dictionary1'), size=10, name='DICT0')
dict1 = Dictionary(1, StringField('dictionary1'), size=5, name='DICT1')
dict2 = Dictionary(2, get_field('dictionary2', 'int64'),
size=50, name='DICT2')
fields = [
DictionaryField('dict0', get_field('', 'int8'), dict0),
DictionaryField('dict1', get_field('', 'int32'), dict1),
DictionaryField('dict2', get_field('', 'int16'), dict2)
]
batch_sizes = [7, 10]
return _generate_file("dictionary", fields, batch_sizes,
dictionaries=[dict0, dict1, dict2])
def generate_dictionary_unsigned_case():
dict0 = Dictionary(0, StringField('dictionary0'), size=5, name='DICT0')
dict1 = Dictionary(1, StringField('dictionary1'), size=5, name='DICT1')
dict2 = Dictionary(2, StringField('dictionary2'), size=5, name='DICT2')
# TODO: JavaScript does not support uint64 dictionary indices, so disabled
# for now
# dict3 = Dictionary(3, StringField('dictionary3'), size=5, name='DICT3')
fields = [
DictionaryField('f0', get_field('', 'uint8'), dict0),
DictionaryField('f1', get_field('', 'uint16'), dict1),
DictionaryField('f2', get_field('', 'uint32'), dict2),
# DictionaryField('f3', get_field('', 'uint64'), dict3)
]
batch_sizes = [7, 10]
return _generate_file("dictionary_unsigned", fields, batch_sizes,
dictionaries=[dict0, dict1, dict2])
def generate_nested_dictionary_case():
dict0 = Dictionary(0, StringField('str'), size=10, name='DICT0')
list_of_dict = ListField(
'list',
DictionaryField('str_dict', get_field('', 'int8'), dict0))
dict1 = Dictionary(1, list_of_dict, size=30, name='DICT1')
struct_of_dict = StructField('struct', [
DictionaryField('str_dict_a', get_field('', 'int8'), dict0),
DictionaryField('str_dict_b', get_field('', 'int8'), dict0)
])
dict2 = Dictionary(2, struct_of_dict, size=30, name='DICT2')
fields = [
DictionaryField('list_dict', get_field('', 'int8'), dict1),
DictionaryField('struct_dict', get_field('', 'int8'), dict2)
]
batch_sizes = [10, 13]
return _generate_file("nested_dictionary", fields, batch_sizes,
dictionaries=[dict0, dict1, dict2])
def generate_extension_case():
dict0 = Dictionary(0, StringField('dictionary0'), size=5, name='DICT0')
uuid_type = ExtensionType('uuid', 'uuid-serialized',
FixedSizeBinaryField('', 16))
dict_ext_type = ExtensionType(
'dict-extension', 'dict-extension-serialized',
DictionaryField('str_dict', get_field('', 'int8'), dict0))
fields = [
ExtensionField('uuids', uuid_type),
ExtensionField('dict_exts', dict_ext_type),
]
batch_sizes = [0, 13]
return _generate_file("extension", fields, batch_sizes,
dictionaries=[dict0])
def get_generated_json_files(tempdir=None):
tempdir = tempdir or tempfile.mkdtemp(prefix='arrow-integration-')
def _temp_path():
return
file_objs = [
generate_primitive_case([], name='primitive_no_batches'),
generate_primitive_case([17, 20], name='primitive'),
generate_primitive_case([0, 0, 0], name='primitive_zerolength'),
generate_primitive_large_offsets_case([17, 20])
.skip_category('Go')
.skip_category('JS'),
generate_null_case([10, 0])
.skip_category('Go') # TODO(ARROW-7901)
.skip_category('JS'), # TODO(ARROW-7900)
generate_null_trivial_case([0, 0])
.skip_category('Go') # TODO(ARROW-7901)
.skip_category('JS'), # TODO(ARROW-7900)
generate_decimal128_case()
.skip_category('Rust'),
generate_decimal256_case()
.skip_category('Go') # TODO(ARROW-7948): Decimal + Go
.skip_category('JS')
.skip_category('Rust'),
generate_datetime_case(),
generate_interval_case()
.skip_category('JS') # TODO(ARROW-5239): Intervals + JS
.skip_category('Rust'),
generate_map_case()
.skip_category('Rust'),
generate_non_canonical_map_case()
.skip_category('Java') # TODO(ARROW-8715)
.skip_category('JS') # TODO(ARROW-8716)
.skip_category('Rust'),
generate_nested_case(),
generate_recursive_nested_case()
.skip_category('Go'), # TODO(ARROW-8453)
generate_nested_large_offsets_case()
.skip_category('Go')
.skip_category('JS')
.skip_category('Rust'),
generate_unions_case()
.skip_category('Go')
.skip_category('JS')
.skip_category('Rust'),
generate_custom_metadata_case()
.skip_category('JS'),
generate_duplicate_fieldnames_case()
.skip_category('Go')
.skip_category('JS'),
# TODO(ARROW-3039, ARROW-5267): Dictionaries in GO
generate_dictionary_case()
.skip_category('Go'),
generate_dictionary_unsigned_case()
.skip_category('Go') # TODO(ARROW-9378)
.skip_category('Java'), # TODO(ARROW-9377)
generate_nested_dictionary_case()
.skip_category('Go')
.skip_category('Java') # TODO(ARROW-7779)
.skip_category('JS')
.skip_category('Rust'),
generate_extension_case()
.skip_category('Go') # TODO(ARROW-3039): requires dictionaries
.skip_category('JS')
.skip_category('Rust'),
]
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(tempdir, 'generated_' +
file_obj.name + '.json')
file_obj.write(out_path)
generated_paths.append(file_obj)
return generated_paths
| apache-2.0 |
rishikksh20/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
bzamecnik/ml | instrument-classification/predict.py | 2 | 4699 | """
Allows to classify musical instrument families from audio clips using a trained
model.
Input:
- audio clip (WAV/FLAC), 2sec, 44100 Hz sampling rate, mono
- model files (architecture, weights)
Ouput: instrument family [brass, guitar, organ, piano, pipe, reed, strings]
"""
import argparse
import keras
from keras.utils import np_utils
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import numpy as np
import pandas as pd
import soundfile as sf
jsonpickle_numpy.register_handlers()
class InstrumentClassifier():
def __init__(self, model_dir):
self.model_dir = model_dir
def load_model(arch_file, weights_file):
"""
Load Keras model from files - YAML architecture, HDF5 weights.
"""
with open(arch_file) as f:
model = keras.models.model_from_yaml(f.read())
model.load_weights(weights_file)
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
def load_model_from_dir(model_dir):
"""
Load Keras model stored into a given directory with some file-name
conventions. YAML architecture, HDF5 weights.
"""
return load_model(
model_dir + '/model_arch.yaml',
model_dir + '/model_weights.h5')
self.model = load_model_from_dir(model_dir)
with open(model_dir + '/preproc_transformers.json', 'r') as f:
self.instr_family_le, self.scaler, self.ch = \
jsonpickle.decode(f.read())
def load_features(self, audio_file):
def stereo_to_mono(x):
# stereo to mono
if len(x.shape) > 1 and x.shape[1] > 1:
print('Converting stereo to mono')
x = x.mean(axis=1)
return x
def cut_or_pad_to_length(x, duration, fs):
desired_length = int(round(duration * fs))
length = len(x)
diff = length - desired_length
abs_diff = abs(diff)
if diff < 0:
print('Padding')
# put the short signal in the middle
pad_before = abs_diff // 2
pad_after = abs_diff - pad_before
x = np.lib.pad(x, (pad_before, pad_after), 'constant')
elif diff > 1:
print('Cutting')
# cut the beginning
x = x[0:desired_length]
return x
def adjust_input(x, fs):
x = stereo_to_mono(x)
x = cut_or_pad_to_length(x, 2.0, fs)
return x
x, fs = sf.read(audio_file)
x = adjust_input(x, fs)
# pitchgram
x_features = self.ch.transform(x)
if self.scaler is not None:
x_features = self.scaler.transform(x_features.reshape(1, -1)) \
# 1 data point with 2D features
x_features = x_features.reshape(1, *x_features.shape)
return x_features
def predict_class_label(self, audio_file):
x_features = self.load_features(audio_file)
instrument_class = np_utils.probas_to_classes(self.model.predict(x_features, verbose=0))[0]
label = self.instr_family_le.inverse_transform(instrument_class)
return label
def predict_probabilities(self, audio_file):
x_features = self.load_features(audio_file)
proba = self.model.predict(x_features, verbose=0).flatten()
df = pd.DataFrame({
'probability': proba,
'class': np.arange(len(proba)),
'label': self.instr_family_le.classes_})
df.sort_values('probability', ascending=False, inplace=True)
df.set_index('class', inplace=True)
return df
def class_label_from_probabilities(self, probabilities):
return probabilities.iloc[0]['label']
def parse_args():
parser = argparse.ArgumentParser(
description='Classifies music instrument family from an audio clip.')
parser.add_argument('audio_file', metavar='AUDIO_FILE', type=str,
help='audio file (WAV, FLAC)')
parser.add_argument('-m', '--model-dir', type=str,
help='directory with model architecture, weights and preprocessing transformers')
parser.add_argument('-p', '--proba', action='store_true', default=False,
help='print probabilities, not just class')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
model = InstrumentClassifier(args.model_dir)
if args.proba:
print(model.predict_probabilities(args.audio_file))
else:
print(model.predict_class_label(args.audio_file))
| mit |
cbmoore/statsmodels | statsmodels/examples/ex_lowess.py | 34 | 2827 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:26:06 2011
Author: Chris Jordan Squire
extracted from test suite by josef-pktd
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# this is just to check direct import
import statsmodels.nonparametric.smoothers_lowess
statsmodels.nonparametric.smoothers_lowess.lowess
x = np.arange(20.)
#standard normal noise
noise = np.array([-0.76741118, -0.30754369,
0.39950921, -0.46352422, -1.67081778,
0.6595567 , 0.66367639, -2.04388585,
0.8123281 , 1.45977518,
1.21428038, 1.29296866, 0.78028477,
-0.2402853 , -0.21721302,
0.24549405, 0.25987014, -0.90709034,
-1.45688216, -0.31780505])
y = x + noise
expected_lowess = np.array([[ 0. , -0.58337912],
[ 1. , 0.61951246],
[ 2. , 1.82221628],
[ 3. , 3.02536876],
[ 4. , 4.22667951],
[ 5. , 5.42387723],
[ 6. , 6.60834945],
[ 7. , 7.7797691 ],
[ 8. , 8.91824348],
[ 9. , 9.94997506],
[ 10. , 10.89697569],
[ 11. , 11.78746276],
[ 12. , 12.62356492],
[ 13. , 13.41538492],
[ 14. , 14.15745254],
[ 15. , 14.92343948],
[ 16. , 15.70019862],
[ 17. , 16.48167846],
[ 18. , 17.26380699],
[ 19. , 18.0466769 ]])
actual_lowess = lowess(y, x)
print(actual_lowess)
print(np.max(np.abs(actual_lowess-expected_lowess)))
plt.plot(y, 'o')
plt.plot(actual_lowess[:,1])
plt.plot(expected_lowess[:,1])
import os.path
import statsmodels.nonparametric.tests.results
rpath = os.path.split(statsmodels.nonparametric.tests.results.__file__)[0]
rfile = os.path.join(rpath, 'test_lowess_frac.csv')
test_data = np.genfromtxt(open(rfile, 'rb'),
delimiter = ',', names = True)
expected_lowess_23 = np.array([test_data['x'], test_data['out_2_3']]).T
expected_lowess_15 = np.array([test_data['x'], test_data['out_1_5']]).T
actual_lowess_23 = lowess(test_data['y'], test_data['x'] ,frac = 2./3)
actual_lowess_15 = lowess(test_data['y'], test_data['x'] ,frac = 1./5)
#plt.show()
| bsd-3-clause |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue ([email protected]) and the Agg backend by John
Hunter ([email protected])
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| agpl-3.0 |
gef756/statsmodels | examples/python/glm_formula.py | 33 | 1547 |
## Generalized Linear Models (Formula)
# This notebook illustrates how you can use R-style formulas to fit Generalized Linear Models.
#
# To begin, we load the ``Star98`` dataset and we construct a formula and pre-process the data:
from __future__ import print_function
import statsmodels.api as sm
import statsmodels.formula.api as smf
star98 = sm.datasets.star98.load_pandas().data
formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',
'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK',
'PERSPENK', 'PTRATIO', 'PCTAF']]
endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW'))
del dta['NABOVE']
dta['SUCCESS'] = endog
# Then, we fit the GLM model:
mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod1.summary()
# Finally, we define a function to operate customized data transformation using the formula framework:
def double_it(x):
return 2 * x
formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod2.summary()
# As expected, the coefficient for ``double_it(LOWINC)`` in the second model is half the size of the ``LOWINC`` coefficient from the first model:
print(mod1.params[1])
print(mod2.params[1] * 2)
| bsd-3-clause |
xguse/ggplot | ggplot/tests/test_geom_bar.py | 12 | 2959 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
import datetime
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 10),
"y": np.arange(0, 10),
"z": np.arange(0, 10),
"a": [1,1,1,1,1,2,2,2,3,3],
"b": ["a","a","a","a","a","b","b","b","c","c"]
})
df['facets'] = np.where(df.x > 4, 'over', 'under')
df['facets2'] = np.where((df.x % 2) == 0, 'even', 'uneven')
return df
@cleanup
def test_labels_auto():
df = pd.DataFrame({ "y" : [3.362, 1.2, 3.424, 2.574, 0.679],
"x" : ["BF","BF","Flann","FastMatch","FastMatch2"],
"c" : ["a", "b", "a", "a","a"]})
p = ggplot(df, aes(x = 'x', y = 'y', fill="c"))
gg = p + geom_bar(stat="bar")
assert_same_ggplot(gg, "labels_auto")
@cleanup
def test_labels_manual():
df = pd.DataFrame({ "y" : [3.362, 1.2, 3.424, 2.574, 0.679],
"x" : ["BF","BF","Flann","FastMatch","FastMatch2"],
"c" : ["a", "b", "a", "a","a"]})
p = ggplot(df, aes(x = 'x', y = 'y', fill="c"))
gg2 = p + geom_bar(stat="bar", labels=["BF","Flann","FastMatch"])
assert_same_ggplot(gg2, "labels_manual")
@cleanup
def test_facet_grid_discrete():
df = _build_testing_df()
gg = ggplot(aes(x='a', y='y', fill='y'), data=df)
assert_same_ggplot(gg + geom_bar(stat='bar') + facet_grid(x="facets", y="facets2"),
"faceting_grid_discrete")
@cleanup
def test_facet_wrap_discrete():
df = _build_testing_df()
gg = ggplot(aes(x='a', y='y'), data=df)
assert_same_ggplot(gg + geom_bar(stat='bar') + facet_wrap(x="facets"), "faceting_wrap_discrete")
@cleanup
def test_facet_colors():
gg = ggplot(diamonds, aes(x = 'clarity', fill = 'cut', color='cut')) +\
stat_bin(binwidth=1200) + facet_wrap("color")
assert_same_ggplot(gg, "facet_colors")
# @cleanup
# def test_date_hist():
# dates = [datetime.date(2014, 3, i) for i in range(1, 31)]
# gg = ggplot(pd.DataFrame({"x": dates}), aes(x='x')) + geom_histogram()
# assert_same_ggplot(gg, "geom_hist_date")
@cleanup
def test_color_hist():
data = { "a" : np.concatenate([np.repeat("a", int(3.262*100)),
np.repeat("b", int(2.574*100))]),
"c" : np.concatenate([np.repeat("c1", int(3.262*40)+1),
np.repeat("c2", int(3.262*60)),
np.repeat("c1", int(2.574*55)+1),
np.repeat("c2", int(2.574*45))])}
df2 = pd.DataFrame(data)
gg = ggplot(df2, aes(x = 'a', fill="c")) + geom_histogram()
assert_same_ggplot(gg, "color_hist")
| bsd-2-clause |
giumas/python-acoustics | acoustics/signal.py | 1 | 40055 | """
Signal
======
The signal module constains all kinds of signal processing related functions.
.. inheritance-diagram:: acoustics.signal
Filtering
*********
.. autoclass:: Filterbank
.. autofunction:: bandpass_filter
.. autofunction:: octave_filter
.. autofunction:: bandpass
.. autofunction:: lowpass
.. autofunction:: highpass
.. autofunction:: octavepass
.. autofunction:: convolve
Windowing
*********
.. autofunction:: window_scaling_factor
.. autofunction:: apply_window
Spectra
*******
Different types of spectra exist.
.. autofunction:: amplitude_spectrum
.. autofunction:: auto_spectrum
.. autofunction:: power_spectrum
.. autofunction:: density_spectrum
.. autofunction:: angle_spectrum
.. autofunction:: phase_spectrum
Frequency bands
***************
.. autoclass:: Band
.. autoclass:: Frequencies
.. autoclass:: EqualBand
.. autoclass:: OctaveBand
.. autofunction:: integrate_bands
.. autofunction:: octaves
.. autofunction:: third_octaves
Hilbert transform
*****************
.. autofunction:: amplitude_envelope
.. autofunction:: instantaneous_phase
.. autofunction:: instantaneous_frequency
Conversion
**********
.. autofunction:: decibel_to_neper
.. autofunction:: neper_to_decibel
Other
*****
.. autofunction:: isolate
.. autofunction:: zero_crossings
.. autofunction:: rms
.. autofunction:: ms
.. autofunction:: normalize
.. autofunction:: ir2fr
.. autofunction:: wvd
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import spdiags
from scipy.signal import butter, lfilter, freqz, filtfilt, sosfilt
import acoustics.octave
#from acoustics.octave import REFERENCE
import acoustics.bands
from scipy.signal import hilbert
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES,
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES)
try:
from pyfftw.interfaces.numpy_fft import rfft
except ImportError:
from numpy.fft import rfft
def bandpass_filter(lowcut, highcut, fs, order=8, output='sos'):
"""Band-pass filter.
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
:returns: Returned value depends on `output`.
A Butterworth filter is used.
.. seealso:: :func:`scipy.signal.butter`.
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
output = butter(order/2, [low, high], btype='band', output=output)
return output
def bandpass(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-pass filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`bandpass_filter` for the filter that is used.
"""
sos = bandpass_filter(lowcut, highcut, fs, order, output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def lowpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='low', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def highpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='high', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def octave_filter(center, fs, fraction, order=8, output='sos'):
"""Fractional-octave band-pass filter.
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
A Butterworth filter is used.
.. seealso:: :func:`bandpass_filter`
"""
ob = OctaveBand(center=center, fraction=fraction)
return bandpass_filter(ob.lower[0], ob.upper[0], fs, order, output=output)
def octavepass(signal, center, fs, fraction, order=8, zero_phase=True):
"""Filter signal with fractional-octave bandpass filter.
:param signal: Signal
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`octave_filter`
"""
sos = octave_filter(center, fs, fraction, order)
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def convolve(signal, ltv, mode='full'):
"""
Perform convolution of signal with linear time-variant system ``ltv``.
:param signal: Vector representing input signal :math:`u`.
:param ltv: 2D array where each column represents an impulse response
:param mode: 'full', 'valid', or 'same'. See :func:`np.convolve` for an explanation of the options.
The convolution of two sequences is given by
.. math:: \mathbf{y} = \mathbf{t} \\star \mathbf{u}
This can be written as a matrix-vector multiplication
.. math:: \mathbf{y} = \mathbf{T} \\cdot \mathbf{u}
where :math:`T` is a Toeplitz matrix in which each column represents an impulse response.
In the case of a linear time-invariant (LTI) system, each column represents a time-shifted copy of the first column.
In the time-variant case (LTV), every column can contain a unique impulse response, both in values as in size.
This function assumes all impulse responses are of the same size.
The input matrix ``ltv`` thus represents the non-shifted version of the Toeplitz matrix.
.. seealso:: :func:`np.convolve`, :func:`scipy.signal.convolve` and :func:`scipy.signal.fftconvolve` for convolution with LTI system.
"""
assert(len(signal) == ltv.shape[1])
n = ltv.shape[0] + len(signal) - 1 # Length of output vector
un = np.concatenate((signal, np.zeros(ltv.shape[0] - 1))) # Resize input vector
offsets = np.arange(0, -ltv.shape[0], -1) # Offsets for impulse responses
Cs = spdiags(ltv, offsets, n, n) # Sparse representation of IR's.
out = Cs.dot(un) # Calculate dot product.
if mode=='full':
return out
elif mode=='same':
start = ltv.shape[0]/2 - 1 + ltv.shape[0]%2
stop = len(signal) + ltv.shape[0]/2 - 1 + ltv.shape[0]%2
return out[start:stop]
elif mode=='valid':
length = len(signal) - ltv.shape[0]
start = ltv.shape[0] - 1
stop = len(signal)
return out[start:stop]
def ir2fr(ir, fs, N=None):
"""
Convert impulse response into frequency response. Returns single-sided RMS spectrum.
:param ir: Impulser response
:param fs: Sample frequency
:param N: Blocks
Calculates the positive frequencies using :func:`np.fft.rfft`.
Corrections are then applied to obtain the single-sided spectrum.
.. note:: Single-sided spectrum. Therefore, the amount of bins returned is either N/2 or N/2+1.
"""
#ir = ir - np.mean(ir) # Remove DC component.
N = N if N else ir.shape[-1]
fr = rfft(ir, n=N) / N
f = np.fft.rfftfreq(N, 1.0/fs) #/ 2.0
fr *= 2.0
fr[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
fr[..., -1] /= 2.0 # And neither should fs/2 be.
#f = np.arange(0, N/2+1)*(fs/N)
return f, fr
def decibel_to_neper(decibel):
"""
Convert decibel to neper.
:param decibel: Value in decibel (dB).
:returns: Value in neper (Np).
The conversion is done according to
.. math :: \\mathrm{dB} = \\frac{\\log{10}}{20} \\mathrm{Np}
"""
return np.log(10.0) / 20.0 * decibel
def neper_to_decibel(neper):
"""
Convert neper to decibel.
:param neper: Value in neper (Np).
:returns: Value in decibel (dB).
The conversion is done according to
.. math :: \\mathrm{Np} = \\frac{20}{\\log{10}} \\mathrm{dB}
"""
return 20.0 / np.log(10.0) * neper
class Frequencies(object):
"""
Object describing frequency bands.
"""
def __init__(self, center, lower, upper, bandwidth=None):
self.center = np.asarray(center)
"""
Center frequencies.
"""
self.lower = np.asarray(lower)
"""
Lower frequencies.
"""
self.upper = np.asarray(upper)
"""
Upper frequencies.
"""
self.bandwidth = np.asarray(bandwidth) if bandwidth is not None else np.asarray(self.upper) - np.asarray(self.lower)
"""
Bandwidth.
"""
def __iter__(self):
for i in range(len(self.center)):
yield self[i]
def __len__(self):
return len(self.center)
def __str__(self):
return str(self.center)
def __repr__(self):
return "Frequencies({})".format(str(self.center))
def angular(self):
"""Angular center frequency in radians per second.
"""
return 2.0 * np.pi * self.center
class EqualBand(Frequencies):
"""
Equal bandwidth spectrum. Generally used for narrowband data.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, bandwidth=None):
"""
:param center: Vector of center frequencies.
:param fstart: First center frequency.
:param fstop: Last center frequency.
:param nbands: Amount of frequency bands.
:param bandwidth: Bandwidth of bands.
"""
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
nbands = 1
u = np.unique(np.diff(center).round(decimals=3))
n = len(u)
if n == 1:
bandwidth = u
elif n > 1:
raise ValueError("Given center frequencies are not equally spaced.")
else:
pass
fstart = center[0] #- bandwidth/2.0
fstop = center[-1] #+ bandwidth/2.0
elif fstart is not None and fstop is not None and nbands:
bandwidth = (fstop - fstart) / (nbands-1)
elif fstart is not None and fstop is not None and bandwidth:
nbands = round((fstop - fstart) / bandwidth) + 1
elif fstart is not None and bandwidth and nbands:
fstop = fstart + nbands * bandwidth
elif fstop is not None and bandwidth and nbands:
fstart = fstop - (nbands-1) * bandwidth
else:
raise ValueError("Insufficient parameters. Cannot determine fstart, fstop, bandwidth.")
center = fstart + np.arange(0, nbands) * bandwidth # + bandwidth/2.0
upper = fstart + np.arange(0, nbands) * bandwidth + bandwidth/2.0
lower = fstart + np.arange(0, nbands) * bandwidth - bandwidth/2.0
super(EqualBand, self).__init__(center, lower, upper, bandwidth)
def __getitem__(self, key):
return type(self)(center=self.center[key], bandwidth=self.bandwidth)
def __repr__(self):
return "EqualBand({})".format(str(self.center))
class OctaveBand(Frequencies):
"""Fractional-octave band spectrum.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, fraction=1, reference=acoustics.octave.REFERENCE):
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
center = np.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
elif fstart is not None and fstop is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstop+1)
elif fstart is not None and nbands is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstart+nbands)
elif fstop is not None and nbands is not None:
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstop-nbands, nstop)
else:
raise ValueError("Insufficient parameters. Cannot determine fstart and/or fstop.")
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
bandwidth = upper - lower
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
super(OctaveBand, self).__init__(center, lower, upper, bandwidth)
self.fraction = fraction
"""Fraction of fractional-octave filter.
"""
self.reference = reference
"""Reference center frequency.
"""
self.nominal = nominal
"""Nominal center frequencies.
"""
def __getitem__(self, key):
return type(self)(center=self.center[key], fraction=self.fraction, reference=self.reference)
def __repr__(self):
return "OctaveBand({})".format(str(self.center))
def ms(x):
"""Mean value of signal `x` squared.
:param x: Dynamic quantity.
:returns: Mean squared of `x`.
"""
return (np.abs(x)**2.0).mean()
def rms(x):
"""Root mean squared of signal `x`.
:param x: Dynamic quantity.
.. math:: x_{rms} = lim_{T \\to \\infty} \\sqrt{\\frac{1}{T} \int_0^T |f(x)|^2 \\mathrm{d} t }
:seealso: :func:`ms`.
"""
return np.sqrt(ms(x))
def normalize(y, x=None):
"""normalize power in y to a (standard normal) white noise signal.
Optionally normalize to power in signal `x`.
#The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1.
"""
#return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() )
if x is not None:
x = ms(x)
else:
x = 1.0
return y * np.sqrt( x / ms(y) )
#return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() )
## Broken? Caused correlation in auralizations....weird!
def window_scaling_factor(window, axis=-1):
"""
Calculate window scaling factor.
:param window: Window.
When analysing broadband (filtered noise) signals it is common to normalize
the windowed signal so that it has the same power as the un-windowed one.
.. math:: S = \\sqrt{\\frac{\\sum_{i=0}^N w_i^2}{N}}
"""
return np.sqrt((window*window).mean(axis=axis))
def apply_window(x, window):
"""
Apply window to signal.
:param x: Instantaneous signal :math:`x(t)`.
:param window: Vector representing window.
:returns: Signal with window applied to it.
.. math:: x_s(t) = x(t) / S
where :math:`S` is the window scaling factor.
.. seealso:: :func:`window_scaling_factor`.
"""
s = window_scaling_factor(window) # Determine window scaling factor.
n = len(window)
windows = x//n # Amount of windows.
x = x[0:windows*n] # Truncate final part of signal that does not fit.
#x = x.reshape(-1, len(window)) # Reshape so we can apply window.
y = np.tile(window, windows)
return x * y / s
def amplitude_spectrum(x, fs, N=None):
"""
Amplitude spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The amplitude spectrum gives the amplitudes of the sinusoidal the signal is built
up from, and the RMS (root-mean-square) amplitudes can easily be found by dividing
these amplitudes with :math:`\\sqrt{2}`.
The amplitude spectrum is double-sided.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / N
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr, axes=[-1])
def auto_spectrum(x, fs, N=None):
"""
Auto-spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The auto-spectrum contains the squared amplitudes of the signal. Squared amplitudes
are used when presenting data as it is a measure of the power/energy in the signal.
.. math:: S_{xx} (f_n) = \\overline{X (f_n)} \\cdot X (f_n)
The auto-spectrum is double-sided.
"""
f, a = amplitude_spectrum(x, fs, N=N)
return f, (a*a.conj()).real
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a
def angle_spectrum(x, fs, N=None):
"""
Phase angle spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided wrapped phase angle spectrum.
.. seealso:: :func:`phase_spectrum` for unwrapped phase spectrum.
"""
N = N if N else x.shape[-1]
f, a = amplitude_spectrum(x, fs, N)
a = np.angle(a)
a = a[..., N//2:]
f = f[..., N//2:]
return f, a
def phase_spectrum(x, fs, N=None):
"""
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
"""
f, a = angle_spectrum(x, fs, N=None)
return f, np.unwrap(a)
#def power_and_phase_spectrum(x, fs, N=None):
#"""
#Power spectrum and phase of instantaneous signal :math:`x(t)`.
#:param x: Instantaneous signal :math:`x(t)`.
#:param fs: Sample frequency :math:`f_s`.
#:param N: Amount of FFT bins.
#Often one is interested in both the power spectrum and the phase. This function returns the power and a single-sided phase spectrum.
#For an explanation of the power spectrum, see :func:`power_spectrum`.
#"""
#returns f, power, phase
def density_spectrum(x, fs, N=None):
"""
Density spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
A density spectrum considers the amplitudes per unit frequency.
Density spectra are used to compare spectra with different frequency resolution as the
magnitudes are not influenced by the resolution because it is per Hertz. The amplitude
spectra on the other hand depend on the chosen frequency resolution.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / fs
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr)
#def auto_density_spectrum(x, fs, N=None):
#"""
#Auto density spectrum of instantaneous signal :math:`x(t)`.
#"""
#f, d = density_spectrum(x, fs, N=N)
#return f, (d*d.conj()).real
#def power_density_spectrum(x, fs, N=None):
#"""
#Power density spectrum.
#"""
#N = N if N else x.shape[-1]
#f, a = auto_density_spectrum(x, fs, N=N)
#a = a[N//2:]
#f = f[N//2:]
#a *= 2.0
#a[..., 0] /= 2.0 # DC component should not be doubled.
#if not N%2: # if not uneven
#a[..., -1] /= 2.0 # And neither should fs/2 be.
#return f, a
def integrate_bands(data, a, b):
"""
Reduce frequency resolution of power spectrum. Merges frequency bands by integration.
:param data: Vector with narrowband powers.
:param a: Instance of :class:`Frequencies`.
:param b: Instance of :class:`Frequencies`.
.. note:: Needs rewriting so that the summation goes over axis=1.
"""
try:
if b.fraction%a.fraction:
raise NotImplementedError("Non-integer ratio of fractional-octaves are not supported.")
except AttributeError:
pass
lower, _ = np.meshgrid(b.lower, a.center)
upper, _ = np.meshgrid(b.upper, a.center)
_, center= np.meshgrid(b.center, a.center)
return ((lower < center) * (center <= upper) * data[...,None]).sum(axis=-2)
def bandpass_frequencies(x, fs, frequencies, order=8, purge=False, zero_phase=False):
""""Apply bandpass filters for frequencies
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Instance of :class:`Frequencies`.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
"""
if purge:
frequencies = frequencies[frequencies.upper < fs/2.0]
return frequencies, np.array([bandpass(x, band.lower, band.upper, fs, order, zero_phase=zero_phase) for band in frequencies])
def bandpass_octaves(x, fs, frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/1-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=1, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_third_octaves(x, fs, frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/3-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=3, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_fractional_octaves(x, fs, frequencies, fraction=None, order=8, purge=False, zero_phase=False):
"""Apply 1/N-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Either instance of :class:`OctaveBand`, or array along with fs.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
if not isinstance(frequencies, Frequencies):
frequencies = OctaveBand(center=frequencies, fraction=fraction)
return bandpass_frequencies(x, fs, frequencies, order=order, purge=purge, zero_phase=zero_phase)
def third_octaves(p, fs, density=False,
frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/3-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.THIRD_OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=3)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def octaves(p, fs, density=False,
frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/1-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:param frequencies: Frequencies.
:param ref: Reference value.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=1)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def fractional_octaves(p, fs, start=5.0, stop=16000.0, fraction=3, density=False):
"""Calculate level per 1/N-octave in frequency domain using the FFT. N is `fraction`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. note:: This function does *not* use nominal center frequencies.
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(fstart=start, fstop=stop, fraction=fraction)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power)
return fob, level
class Filterbank(object):
"""
Fractional-Octave filter bank.
.. warning:: For high frequencies the filter coefficients are wrong for low frequencies. Therefore, to improve the response for lower frequencies the signal should be downsampled. Currently, there is no easy way to do so within the Filterbank.
"""
def __init__(self, frequencies, sample_frequency=44100, order=8):
self.frequencies = frequencies
"""
Frequencies object.
See also :class:`Frequencies` and subclasses.
.. note:: A frequencies object should have the attributes center, lower and upper.
"""
self.order = order
"""
Filter order of Butterworth filter.
"""
self.sample_frequency = sample_frequency
"""
Sample frequency.
"""
@property
def sample_frequency(self):
"""
Sample frequency.
"""
return self._sample_frequency
@sample_frequency.setter
def sample_frequency(self, x):
#if x <= self.center_frequencies.max():
#raise ValueError("Sample frequency cannot be lower than the highest center frequency.")
self._sample_frequency = x
@property
def filters(self):
"""
Filters this filterbank consists of.
"""
fs = self.sample_frequency
return ( bandpass_filter(lower, upper, fs, order=self.order, output='ba') for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
#order = self.order
#filters = list()
#nyq = self.sample_frequency / 2.0
#return ( butter(order, [lower/nyq, upper/nyq], btype='band', analog=False) for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
def lfilter(self, signal):
"""
Filter signal with filterbank.
.. note:: This function uses :func:`scipy.signal.lfilter`.
"""
return ( lfilter(b, a, signal) for b, a in self.filters )
def filtfilt(self, signal):
"""
Filter signal with filterbank.
Returns a list consisting of a filtered signal per filter.
.. note:: This function uses :func:`scipy.signal.filtfilt` and therefore has a zero-phase response.
"""
return ( filtfilt(b, a, signal) for b, a in self.filters )
def power(self, signal):
"""
Power per band in signal.
"""
filtered = self.filtfilt(signal)
return np.array([(x**2.0).sum()/len(x) / bw for x, bw in zip(filtered, self.frequencies.bandwidth)])
def plot_response(self):
"""
Plot frequency response.
.. note:: The follow phase response is obtained in case :meth:`lfilter` is used. The method :meth:`filtfilt` results in a zero-phase response.
"""
fs = self.sample_frequency
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for f, fc in zip(self.filters, self.frequencies.center):
w, h = freqz(f[0], f[1], int(fs/2))#np.arange(fs/2.0))
ax1.semilogx(w / (2.0*np.pi) * fs, 20.0 * np.log10(np.abs(h)), label=str(int(fc)))
ax2.semilogx(w / (2.0*np.pi) * fs, np.angle(h), label=str(int(fc)))
ax1.set_xlabel(r'$f$ in Hz')
ax1.set_ylabel(r'$|H|$ in dB re. 1')
ax2.set_xlabel(r'$f$ in Hz')
ax2.set_ylabel(r'$\angle H$ in rad')
ax1.legend(loc=5)
ax2.legend(loc=5)
ax1.set_ylim(-60.0, +10.0)
return fig
def plot_power(self, signal):
"""
Plot power in signal.
"""
f = self.frequencies.center
p = self.power(signal)
fig = plt.figure()
ax = fig.add_subplot(111)
p = ax.bar(f, 20.0*np.log10(p))
ax.set_xlabel('$f$ in Hz')
ax.set_ylabel('$L$ in dB re. 1')
ax.set_xscale('log')
return fig
#class FilterbankFFT(object):
#"""
#Filterbank to filter signal using FFT.
#"""
#def __init__(self, frequencies, sample_frequency=44100):
#self.frequencies = frequencies
#"""
#Frequencies.
#See also :class:`Frequencies` and subclasses.
#"""
#self.sample_frequency = sample_frequency
#def power(self, signal):
#pass
#def plot_power(self, signal):
#pass
def isolate(signals):
"""Isolate signals.
:param signals: Array of shape N x M where N is the amount of samples and M the amount of signals. Thus, each column is a signal.
:returns: Array of isolated signals. Each column is a signal.
Isolate signals using Singular Value Decomposition.
"""
x = np.asarray(signals)
W, s, v = np.linalg.svd( (np.tile( (x*x).sum(axis=0), (len(x), 1) ) * x).dot(x.T) )
return v.T
def zero_crossings(data):
"""
Determine the positions of zero crossings in `data`.
:param data: Vector
:returns: Vector with indices of samples *before* the zero crossing.
"""
pos = data > 0
npos = ~pos
return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]
def amplitude_envelope(signal, fs):
"""Instantaneous amplitude of tone.
The instantaneous amplitude is the magnitude of the analytic signal.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.abs(hilbert(signal))
def instantaneous_phase(signal, fs):
"""Instantaneous phase of tone.
The instantaneous phase is the angle of the analytic signal.
This function returns a wrapped angle.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.angle(hilbert(signal))
def instantaneous_frequency(signal, fs):
"""Determine instantaneous frequency of tone.
The instantaneous frequency can be obtained by differentiating the unwrapped instantaneous phase.
.. seealso:: :func:`instantaneous_phase`
"""
return np.diff( np.unwrap(instantaneous_phase(signal, fs))) / (2.0*np.pi) * fs
def wvd(signal, fs, analytic=True):
"""Wigner-Ville Distribution
:param signal: Signal
:param fs: Sample frequency
:param analytic: Use the analytic signal, calculated using Hilbert transform.
.. math:: W_z(n, \\omega) = 2 \\sum_k z^*[n-k]z[n+k] e^{-j\\omega 2kT}
Includes positive and negative frequencies.
"""
signal = np.asarray(signal)
N = int(len(signal)+len(signal)%2)
length_FFT = N # Take an even value of N
if N != len(signal):
signal = np.concatenate(signal, [0])
length_time = len(signal)
if analytic:
signal = hilbert(signal)
s = np.concatenate((np.zeros(length_time), signal, np.zeros(length_time)))
W = np.zeros((length_FFT,length_time))
tau = np.arange(0, N//2)
R = np.zeros((N, length_time), dtype='float64')
i = length_time
for t in range(length_time):
R[t, tau1] = ( s[i+tau] * s[i-tau].conj() ) # In one direction
R[t, N-(tau+1)] = R[t, tau+1].conj() # And the other direction
i += 1
W = np.fft.fft(R, length_FFT) / (2*length_FFT)
f = np.fft.fftfreq(N, 1./fs)
return f, W.T
def _sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None):
"""Filtfilt version using Second Order sections. Code is taken from scipy.signal.filtfilt and adapted to make it work with SOS.
Note that broadcasting does not work.
"""
from scipy.signal import sosfilt_zi
from scipy.signal._arraytools import odd_ext, axis_slice, axis_reverse
x = np.asarray(x)
if padlen is None:
edge = 0
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = sosfilt_zi(sos)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
#zi_shape = [1] * x.ndim
#zi_shape[axis] = zi.size
#zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
__all__ = ['bandpass',
'bandpass_frequencies',
'bandpass_fractional_octaves',
'bandpass_octaves',
'bandpass_third_octaves',
'lowpass',
'highpass',
'octavepass',
'octave_filter',
'bandpass_filter',
'convolve',
'ir2fr',
'decibel_to_neper',
'neper_to_decibel',
'EqualBand',
'OctaveBand',
'ms',
'rms',
'normalize',
'window_scaling_factor',
'apply_window',
'amplitude_spectrum',
'auto_spectrum',
'power_spectrum',
'angle_spectrum',
'phase_spectrum',
'density_spectrum',
'integrate_bands',
'octaves',
'third_octaves',
'fractional_octaves',
'Filterbank',
'isolate',
'zero_crossings',
'amplitude_envelope',
'instantaneous_phase',
'instantaneous_frequency',
'wvd',
]
| bsd-3-clause |
jenfly/atmos-tools | atmos/analysis.py | 1 | 18587 | """
Functions for atmospheric data analysis.
- Spectral analysis
- Timeseries
- Linear regression
"""
from __future__ import division
import numpy as np
import collections
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import atmos.utils as utils
import atmos.xrhelper as xr
# ======================================================================
# SPECTRAL ANALYSIS
# ======================================================================
class Fourier:
def __init__(self, y, dt=1.0, t=None, axis=0, time_units=None,
data_name=None):
"""Return Fourier transform of a timeseries.
Uses the numpy FFT function for real-valued inputs,
numpy.fft.rfft().
Parameters
----------
y : ndarray
N-D array of timeseries data.
dt : float, optional
Time spacing of data.
axis : int, optional
Time dimension to use for FFT.
t : ndarray
Array of times (e.g. datetimes for plotting timeseries).
time_units : str, optional
Time units.
data_name : str, optional
Name of timeseries data.
Returns
-------
self : Fourier object
The Fourier object has the following data attributes:
t, tseries : ndarray
Time values and data from input timeseries
f_k, tau_k : ndarray
Frequencies and periods in Fourier transform.
C_k : complex ndarray
Fourier coefficients.
ps_k : ndarray
Power spectral density at each frequency.
And it has the following methods:
smooth() : Smooth a timeseries with truncated FFT.
harmonic() : Return the k'th harmonic of the FFT.
Rsquared() : Return the Rsquared values of the FFT.
"""
# Make sure we're working with an ndarray and not a DataArray
if isinstance(y, xray.DataArray):
y = y.values
self.attrs = {'data_name' : data_name, 'time_units' : time_units,
'dt' : dt}
dims = y.shape
n = dims[axis]
if t is None:
t = dt * np.arange(n)
self.t = t
self.tseries = y
self.n = n
# Fourier frequencies and coefficients
self.f_k = np.fft.rfftfreq(n, dt)
self.C_k = np.fft.rfft(y, axis=axis)
self.axis = axis
# Periods corresponding to Fourier frequencies
self.tau_k = np.concatenate(([np.nan], 1/self.f_k[1:]))
# Power spectral density
ps_k = 2 * np.abs(self.C_k / n)**2
self.ps_k = ps_k
def __repr__(self):
def var_str(name, x):
width = 10
return ' %s %s\n' % (name.ljust(width),str(x.shape))
s = 'Attributes\n' + str(self.attrs)
s = s + '\n Axis: %d\n n: %d\n' % (self.axis, self.n)
s = s + '\nData\n'
s = s + (var_str('t', self.t) + var_str('tseries', self.tseries) +
var_str('f_k', self.f_k) + var_str('tau_k', self.tau_k) +
var_str('C_k', self.C_k) + var_str('ps_k', self.ps_k))
return s
def smooth(self, kmax):
"""Return a smooth timeseries from the FFT truncated at kmax."""
n = self.n
ax = self.axis
C_k = self.C_k
C_k = np.split(C_k, [kmax + 1], axis=ax)[0]
ysmooth = np.fft.irfft(C_k, n, axis=ax)
return ysmooth
def harmonic(self, k):
"""Return the k'th Fourier harmonic of the timeseries."""
if k == 0:
y = self.smooth(k)
else:
y = self.smooth(k) - self.smooth(k - 1)
return y
def Rsquared(self):
"""Return the coefficients of determination of the FFT.
The sum of the Rsq values up to and including the k-th Fourier
harmonic gives the amount of variance explained by those
harmonics.
"""
axis = self.axis
var = np.var(self.tseries, axis=axis)
ps_k = self.ps_k
# The k=0 harmonic (i.e. constant function) does not contribute
# to the variance in the timeseries.
if axis == 1:
var = np.expand_dims(var, axis)
ps_k[:, 0] = 0.0
elif axis ==0:
ps_k[0] = 0.0
else:
raise ValueError('Invalid axis ' + str(axis))
Rsq = ps_k / var
return Rsq
# ----------------------------------------------------------------------
def fourier_from_scratch(y, dt=1.0, ntrunc=None):
"""Calculate Fourier transform from scratch and smooth a timeseries.
Parameters
----------
y : ndarray
1-D array of timeseries data.
dt : float, optional
Time spacing of data.
ntrunc : int, optional
Maximum harmonics to include in smoothed output.
Returns
-------
f_k : ndarray
Frequencies in Fourier transform.
C_k : complex ndarray
Fourier coefficients.
ps_k : ndarray
Power spectral density at each frequency.
harmonics : dict of ndarrays
Timeseries of harmonics corresponding to each Fourier frequency.
ypred : ndarray
Timeseries of data predicted by Fourier coefficients, to
check that the reverse Fourier transform matches the input
timeseries.
ytrunc : ndarray
Smoothed timeseries of data predicted by the Fourier harmonics
truncated at maximum frequency f_k where k = ntrunc.
Reference
---------
Wilks, D. S. (2011). Statistical Methods in the Atmospheric Sciences.
International Geophysics (Vol. 100).
"""
n = len(y)
nhalf = n // 2
t = np.arange(1, n+1)
omega1 = 2 * np.pi / n
# Fourier transform and harmonics
# --------------------------------
# Calculate according to equations 9.62-9.65 in Wilks (2011) and
# rescale to be consistent with scaling in the output from
# numpy's FFT function.
Ak = np.zeros(nhalf + 1, dtype=float)
Bk = np.zeros(nhalf + 1, dtype=float)
Ak[0] = np.sum(y) / 2.0
Bk[0] = 0.0
harmonics = {0 : np.mean(y)}
for k in range(1, nhalf + 1):
omega = k * omega1
Ak[k] = np.sum(y * np.cos(omega*t))
Bk[k] = np.sum(y * np.sin(omega*t))
harmonics[k] = ((2.0/n) * Ak[k] * np.cos(omega*t) +
(2.0/n) * Bk[k] * np.sin(omega*t))
# Fourier coefficients
C_k = Ak + 1j * Bk
# Frequencies
f_k = np.arange(n//2 + 1) / float(n * dt)
# Power spectral density
ps_k = 2 * np.abs(C_k / n)**2
# Predicted y and smoothed truncated predicted y
ypred = np.zeros(n, dtype=float)
ytrunc = np.zeros(n, dtype=float)
for k in harmonics:
ypred += harmonics[k]
if ntrunc is not None and k <= ntrunc:
ytrunc += harmonics[k]
return f_k, C_k, ps_k, harmonics, ypred, ytrunc
# ----------------------------------------------------------------------
def fourier_smooth(data, kmax, axis=0):
"""Return data smoothed with Fourier series truncated at kmax.
Parameters
----------
data : ndarray
Data to smooth.
kmax : int
Maximum Fourier harmonic to include.
axis : int, optional
Dimension to compute Fourier transform and smoothing.
Returns
-------
data_out : ndarray
Smoothed data.
Rsq : ndarray
Coefficient of determination for the smoothed data,
i.e. fraction of total variance accounted for by the
smoothed data.
"""
ft = Fourier(data, axis=axis)
data_out = ft.smooth(kmax)
Rsq = ft.Rsquared()
Rsq = np.split(Rsq, [kmax + 1], axis=axis)[0]
Rsq = np.sum(Rsq, axis=axis)
return data_out, Rsq
# ======================================================================
# LINEAR REGRESSION AND CORRELATIONS
# ======================================================================
class Linreg:
def __init__(self, x, y):
"""Return least-squares regression line.
See scipy.stats.linregress for details."""
# Convert any lists to arrays
if not isinstance(x, np.ndarray):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
ind = np.isfinite(x) & np.isfinite(y)
x, y = x[ind], y[ind]
reg = scipy.stats.linregress(x, y)
self.slope, self.intercept, self.r, self.p, self.stderr = reg
self.x, self.y = x, y
def __repr__(self):
s = 'Slope: ' + str(self.slope)
s = s + '\nIntercept: ' + str(self.intercept)
s = s + '\nCorrelation coefficient: ' + str(self.r)
s = s + '\np-value: ' + str(self.p)
s = s + '\nStandard error: ' + str(self.stderr)
return s
def predict(self, x):
"""Return predicted y values from linear regression."""
ypred = np.polyval([self.slope, self.intercept], x)
return ypred
def plot(self, scatter_clr='b', scatter_sym='o', scatter_size=5,
line_clr='r', line_width=1, annotation_pos='topleft',
pmax_bold=None):
plt.plot(self.x, self.y, color=scatter_clr, marker=scatter_sym,
markersize=scatter_size, linestyle='none')
ypred = self.predict(self.x)
plt.plot(self.x, ypred, color=line_clr, linewidth=line_width)
if annotation_pos is not None:
s = 'r = ' + utils.format_num(self.r) + '\n'
s = s + 'p = ' + utils.format_num(self.p) + '\n'
m = utils.format_num(self.slope)
y0 = utils.format_num(self.intercept, plus_sym=True)
s = s + 'y = %s x %s' % (m, y0)
if pmax_bold is not None and self.p < pmax_bold:
wt = 'bold'
else:
wt = 'normal'
utils.text(s, annotation_pos, fontweight=wt)
# ----------------------------------------------------------------------
def regress_field(data, index, axis=-1):
"""Return the linear regression along an axis.
Parameters
----------
data : ndarray or xray.DataArray
Input data.
index : ndarray or xray.DataArray
Index values to regress against. Length must match length of
data along specified axis.
axis : int, optional
Axis to compute along.
Returns
-------
reg_data : xray.Dataset
Dataset containing correlation coefficients, slopes, and p-values.
"""
# Maximum number of dimensions handled by this code
nmax = 5
ndim = data.ndim
if ndim > 5:
raise ValueError('Input data has too many dimensions. Max 5-D.')
if isinstance(data, xray.DataArray):
name, attrs, coords, dimnames = xr.meta(data)
coords = utils.odict_delete(coords, dimnames[axis])
dimnames = list(dimnames)
dimnames.pop(axis)
vals = data.values
else:
vals = data
# Roll axis to end
vals = np.rollaxis(vals, axis, ndim)
# Add singleton dimensions for looping, if necessary
for i in range(ndim, nmax):
vals = np.expand_dims(vals, axis=0)
# Initialize output
dims = vals.shape[:-1]
reg = {}
reg['r'] = np.ones(dims, dtype=float)
reg['m'] = np.ones(dims, dtype=float)
reg['p'] = np.ones(dims, dtype=float)
# Compute regression, iterating over other dimensions
for i in range(dims[0]):
for j in range(dims[1]):
for k in range(dims[2]):
for m in range(dims[3]):
reg_sub = Linreg(index, vals[i,j,k,m])
reg['r'] [i,j,k,m] = reg_sub.r
reg['m'] [i,j,k,m] = reg_sub.slope
reg['p'] [i,j,k,m] = reg_sub.p
# Collapse any additional dimensions that were added
n = reg['r'].ndim
for nm in reg:
for i in range(ndim - 1, n):
reg[nm] = reg[nm][0]
reg_data = xray.Dataset()
if isinstance(data, xray.DataArray):
reg_data['r'] = xray.DataArray(reg['r'] , name='r', coords=coords,
dims=dimnames)
else:
reg_data['r'] = xray.DataArray(reg['r'] , name='r')
coords, dimnames = reg_data['r'].coords, reg_data['r'].dims
for nm in ['m', 'p']:
reg_data[nm] = xray.DataArray(reg[nm] , name=nm, coords=coords,
dims=dimnames)
long_names = {'r' : 'correlation coefficient', 'm' : 'slope',
'p' : 'p-value'}
for nm in reg_data.data_vars:
reg_data[nm].attrs['long_name'] = long_names[nm]
return reg_data
# ----------------------------------------------------------------------
def corr_matrix(df, incl_index=False):
"""Return correlation coefficients and p-values between data pairs.
Parameters
----------
df : pandas.DataFrame
Input data.
incl_index : bool, optional
If True, include the index in the pairs of correlation calculations.
Returns
-------
corr : dict of DataFrames
Correlation coefficients (corr['r']) and p-values (corr['p']) between
each pair of columns in df.
"""
if incl_index:
df[df.index.name] = df.index
cols = df.columns
n = len(cols)
r = np.nan * np.ones((n, n))
p = np.nan * np.ones((n, n))
for i in range(n):
for j in range(i + 1):
r[i, j], p[i, j] = scipy.stats.pearsonr(df[cols[i]], df[cols[j]])
r[j, i], p[j, i] = r[i, j], p[i, j]
corr = {}
corr['r'] = pd.DataFrame(r, index=cols, columns=cols)
corr['p'] = pd.DataFrame(p, index=cols, columns=cols)
return corr
# ----------------------------------------------------------------------
def scatter_matrix(data, corr_fmt='%.2f', annotation_pos=(0.05, 0.85),
figsize=(16,10), incl_p=False, incl_line=False,
suptitle=None, annotation_wt='bold', pmax_bold=None):
"""Matrix of scatter plots with correlation coefficients annotated.
Parameters
----------
data : pd.DataFrame
Data to plot.
corr_fmt : str, optional
Formatting for annotation with correlation coefficient.
annotation_pos : tuple, optional
x, y position for annotation (dimensionless units from 0-1).
figsize : tuple, optional
Figure size.
incl_p : bool, optional
If True, include p-value in annotation.
incl_line : bool, optional
If True, include linear regression line.
suptitle : str, optional
Supertitle to go above subplots.
annotation_wt : str, optional
Fontweight for annotation.
pmax_bold : float, optional
Make annotation bold for any correlations with p < pmax_bold,
normal weight otherwise. If None, then use annotation_wt
for all.
"""
if not corr_fmt.startswith('%'):
corr_fmt = '%' + corr_fmt
def annotation(r, p, m, y0, corr_fmt, incl_p, incl_line, pos, wt,
pmax_bold):
s = corr_fmt % r
if incl_p or incl_line:
s = 'r = ' + s + '\n'
if incl_p:
s = s + 'p = ' + utils.format_num(p) + '\n'
if incl_line:
m = utils.format_num(m)
y0 = utils.format_num(y0, plus_sym=True)
s = s + 'y = %s x %s' % (m, y0)
if pmax_bold is not None:
# Override font weight based on p-value
if p < pmax_bold:
wt = 'bold'
else:
wt = 'normal'
utils.text(s, pos, fontweight=wt, color='black')
# Matrix of scatter plots
ax = pd.scatter_matrix(data, figsize=figsize)
# Annotate with correlation coefficients and p-values
nrow, ncol = len(data.columns), len(data.columns)
iplot = 1
for i, col1 in enumerate(data.columns):
for j, col2 in enumerate(data.columns):
if i != j:
plt.subplot(nrow, ncol, iplot)
reg = Linreg(data[col2], data[col1])
annotation(reg.r, reg.p, reg.slope, reg.intercept, corr_fmt,
incl_p, incl_line, annotation_pos, annotation_wt,
pmax_bold)
if incl_line:
plt.plot(data[col2], reg.predict(data[col2]), 'k')
iplot += 1
plt.draw()
if suptitle is not None:
plt.suptitle(suptitle)
# ----------------------------------------------------------------------
def scatter_matrix_pairs(dfx, dfy, figsize=(12, 9), suptitle='',
fmts = {'scatter_clr' : 'k', 'scatter_sym' : '+',
'scatter_size' : 3, 'line_clr' : 'k',
'line_width' : 1,
'annotation_pos' : (0.05, 0.75),
'pmax_bold' : 0.05},
subplot_fmts = {'left' : 0.08, 'right' : 0.95,
'bottom' : 0.05, 'top' : 0.95,
'wspace' : 0, 'hspace' : 0}):
"""Matrix of scatter plots for a pair of dataframes.
"""
nrow = len(dfy.columns)
ncol = len(dfx.columns)
fig, axes = plt.subplots(nrow, ncol, sharex='col', sharey='row',
figsize=figsize)
plt.subplots_adjust(**subplot_fmts)
plt.suptitle(suptitle)
iplot = 1
for ykey in dfy.columns:
y = dfy[ykey]
for xkey in dfx.columns:
x = dfx[xkey]
reg = Linreg(x, y)
plt.subplot(nrow, ncol, iplot)
reg.plot(**fmts)
row, col = utils.subplot_index(nrow, ncol, iplot)
if row == nrow:
plt.xlabel(xkey)
if col == 1:
plt.ylabel(ykey)
else:
plt.gca().set_yticklabels([])
iplot += 1
# ----------------------------------------------------------------------
def detrend(df):
"""Return a pd.DataFrame or pd.Series with the data detrended."""
if isinstance(df, pd.Series):
series = True
df = df.to_frame()
else:
series = False
df_detrend = pd.DataFrame()
x = df.index.values
for col in df.columns:
y = df[col].values
reg = Linreg(x, y)
df_detrend[col] = df[col] - reg.predict(x)
if series:
df_detrend = df_detrend[df_detrend.columns[0]]
return df_detrend
# ----------------------------------------------------------------------
# regress_field()
# time_detrend
# time_std, time_mean, etc.
| mit |
andim/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
socrata/arcs | arcs/error_analysis.py | 1 | 5218 | import psycopg2
import pandas as pd
from launch_job import cleanup_description
from db import group_queries_and_judgments_query
# NB: this code was pulled from es_load.py:
# https://github.com/socrata/cetera-etl/blob/master/src/etl/es_load.py#L49-L63)
# It is subject to change. Ideally, we would have a shared module for these types of data
# contracts. But things have been changing quickly. Let's revisit when ETL is more stable.
DATATYPE_MAPPING = {
"datasets": ("dataset", ""),
"datalenses": ("datalens", ""),
"calendars": ("calendar", ""),
"charts": ("chart", ""),
"datalens_charts": ("chart", "datalens"),
"files": ("file", ""),
"forms": ("form", ""),
"filters": ("filter", ""),
"hrefs": ("href", ""),
"geo_maps": ("map", "geo"),
"tabular_maps": ("map", "tabular"),
"datalens_maps": ("map", "datalens"),
"pulses": ("pulse", ""),
"stories": ("story", "")
}
OUTPUT_COLUMNS = ("query", "result_fxf", "result_position",
"name", "description", "url", "judgment")
def get_irrelevant_qrps(results):
"""
Get a list of query-result pairs from the raw judgments where more than one
worker gave the result a relevance score of 0.
"""
error = "irrelevant"
return [(d["query"], d["name"], d["link"], error, d["relevance"]["res"].count("0"))
for d in results.itervalues() if d["relevance"]["res"].count("0") > 1]
def _get_max_source_tag(db_conn):
with db_conn.cursor() as cur:
cur.execute("SELECT MAX(source_tag) FROM cetera_core_datatypes_snapshot")
return cur.fetchone()[0]
def get_fxf_metadata_mapping(db_conn):
"""
Get a dict mapping FXF to useful metadata about a dataset.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
instance
Returns:
A dict of FXFs to dataset metadata
"""
query = "SELECT nbe_fxf, datatype, domain_cname, unit_name AS name, " \
"unit_desc AS description " \
"FROM cetera_core_datatypes_snapshot WHERE source_tag = %s"
with db_conn.cursor() as cur:
source_tag = _get_max_source_tag(db_conn)
cur.execute(query, (source_tag,))
return {nbe_fxf: {
"datatype": datatype,
"domain_cname": domain_cname,
"name": name,
"description": description
} for nbe_fxf, datatype, domain_cname, name, description in cur}
def get_dataset_url(domain, fxf, datatype):
"""
Generate a permalink from the dataset's domain, fxf, and datatype.
Args:
domain (str): The domain of the dataset
fxf (str): The fxf of the dataset
datatype (tuple): The (view_type, display_type) of the dataset
Returns:
A dataset URL
"""
dtype, vtype = DATATYPE_MAPPING.get(datatype, (None, None))
if dtype == "story":
url = "https://{}/stories/s/{}".format(domain, fxf)
elif dtype == "datalens" or vtype == "datalens":
url = "https://{}/view/{}".format(domain, fxf)
else:
url = "https://{}/d/{}".format(domain, fxf)
return url
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Gather data from CrowdFlower judgments to server as '
'the basis for error analysis')
parser.add_argument('group_id', type=int,
help="The group whose judgments are the basis for analysis")
parser.add_argument('-o', '--outfile', dest='outfile', type=str, required=True,
help='Name of CSV file to which data will be written.')
parser.add_argument('-D', '--db_conn_str', required=True, help='Database connection string')
args = parser.parse_args()
db_conn = psycopg2.connect(args.db_conn_str)
print("Reading metadata")
fxf_metadata_dict = get_fxf_metadata_mapping(db_conn)
print("Reading all judged data for group")
data_df = pd.read_sql(
group_queries_and_judgments_query(db_conn, args.group_id, "domain_catalog"),
db_conn)
print("Counting irrelevants")
data_df["num_irrelevants"] = data_df["raw_judgments"].apply(
lambda js: sum([1 for j in js if "judgment" in j and j["judgment"] < 1]))
data_df = data_df[data_df["num_irrelevants"] >= 2]
print("Adding metadata to dataframe")
data_df["metadata"] = data_df["result_fxf"].apply(
lambda fxf: fxf_metadata_dict.get(fxf, {}))
print("Extracting dataset names")
data_df["name"] = data_df["metadata"].apply(
lambda metadata: metadata.get("name"))
print("Extracting and cleaning descriptions")
data_df["description"] = data_df["metadata"].apply(
lambda metadata: cleanup_description(metadata.get("description", "").decode("utf-8")))
print("Extracting URLs")
data_df["url"] = data_df.apply(
lambda row: get_dataset_url(
row["metadata"].get("domain_cname"),
row["result_fxf"],
row["metadata"].get("datatype")), axis=1)
data_df.sort_values("judgment", inplace=True)
outfile = args.outfile or "errors.csv"
data_df.to_csv(outfile, encoding="utf-8", index=False, columns=OUTPUT_COLUMNS)
| mit |
jseabold/statsmodels | statsmodels/tsa/statespace/dynamic_factor_mq.py | 1 | 186145 | # -*- coding: utf-8 -*-
"""
Dynamic factor model.
Author: Chad Fulton
License: BSD-3
"""
from collections import OrderedDict
from warnings import warn
import numpy as np
import pandas as pd
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.validation import int_like
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.multivariate.pca import PCA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace._quarterly_ar1 import QuarterlyAR1
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.validation import string_like
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tsa.statespace import mlemodel, initialization
from statsmodels.tsa.statespace.tools import (
companion_matrix, is_invertible, constrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_univariate,
unconstrain_stationary_multivariate)
from statsmodels.tsa.statespace.kalman_smoother import (
SMOOTHER_STATE, SMOOTHER_STATE_COV, SMOOTHER_STATE_AUTOCOV)
from statsmodels.base.data import PandasData
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.tableformatting import fmt_params
class FactorBlock(dict):
"""
Helper class for describing and indexing a block of factors.
Parameters
----------
factor_names : tuple of str
Tuple of factor names in the block (in the order that they will appear
in the state vector).
factor_order : int
Order of the vector autoregression governing the factor block dynamics.
endog_factor_map : pd.DataFrame
Mapping from endog variable names to factor names.
state_offset : int
Offset of this factor block in the state vector.
has_endog_Q : bool
Flag if the model contains quarterly data.
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector that are associated with a
particular block of factors.
- `factors_ix` is a matrix of indices, with rows corresponding to factors
in the block and columns corresponding to lags
- `factors` is vec(factors_ix) (i.e. it stacks columns, so that it is
`factors_ix.ravel(order='F')`). Thinking about a VAR system, the first
k*p elements correspond to the equation for the first variable. The next
k*p elements correspond to the equation for the second variable, and so
on. It contains all of the lags in the state vector, which is max(5, p)
- `factors_ar` is the subset of `factors` that have nonzero coefficients,
so it contains lags up to p.
- `factors_L1` only contains the first lag of the factors
- `factors_L1_5` contains the first - fifth lags of the factors
"""
def __init__(self, factor_names, factor_order, endog_factor_map,
state_offset, k_endog_Q):
self.factor_names = factor_names
self.k_factors = len(self.factor_names)
self.factor_order = factor_order
self.endog_factor_map = endog_factor_map.loc[:, factor_names]
self.state_offset = state_offset
self.k_endog_Q = k_endog_Q
if self.k_endog_Q > 0:
self._factor_order = max(5, self.factor_order)
else:
self._factor_order = self.factor_order
self.k_states = self.k_factors * self._factor_order
# Save items
self['factors'] = self.factors
self['factors_ar'] = self.factors_ar
self['factors_ix'] = self.factors_ix
self['factors_L1'] = self.factors_L1
self['factors_L1_5'] = self.factors_L1_5
@property
def factors_ix(self):
"""Factor state index array, shaped (k_factors, lags)."""
# i.e. the position in the state vector of the second lag of the third
# factor is factors_ix[2, 1]
# ravel(order='F') gives e.g (f0.L1, f1.L1, f0.L2, f1.L2, f0.L3, ...)
# while
# ravel(order='C') gives e.g (f0.L1, f0.L2, f0.L3, f1.L1, f1.L2, ...)
o = self.state_offset
return np.reshape(o + np.arange(self.k_factors * self._factor_order),
(self._factor_order, self.k_factors)).T
@property
def factors(self):
"""Factors and all lags in the state vector (max(5, p))."""
# Note that this is equivalent to factors_ix with ravel(order='F')
o = self.state_offset
return np.s_[o:o + self.k_factors * self._factor_order]
@property
def factors_ar(self):
"""Factors and all lags used in the factor autoregression (p)."""
o = self.state_offset
return np.s_[o:o + self.k_factors * self.factor_order]
@property
def factors_L1(self):
"""Factors (first block / lag only)."""
o = self.state_offset
return np.s_[o:o + self.k_factors]
@property
def factors_L1_5(self):
"""Factors plus four lags."""
o = self.state_offset
return np.s_[o:o + self.k_factors * 5]
class DynamicFactorMQStates(dict):
"""
Helper class for describing and indexing the state vector.
Parameters
----------
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
endog_names : list
Names of the endogenous variables.
factors : int, list, or dict
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, ....
factor_orders : int or dict
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
Attributes
----------
k_endog : int
Total number of endogenous variables.
k_states : int
Total number of state variables (those associated with the factors and
those associated with the idiosyncratic disturbances).
k_posdef : int
Total number of state disturbance terms (those associated with the
factors and those associated with the idiosyncratic disturbances).
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
k_factors : int
Total number of factors. Note that factor multiplicities will have
already been expanded.
k_states_factors : int
The number of state variables associated with factors (includes both
factors and lags of factors included in the state vector).
k_posdef_factors : int
The number of state disturbance terms associated with factors.
k_states_idio : int
Total number of state variables associated with idiosyncratic
disturbances.
k_posdef_idio : int
Total number of state disturbance terms associated with idiosyncratic
disturbances.
k_states_idio_M : int
The number of state variables associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_states_idio_Q : int
The number of state variables associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q * 5`, even if the disturbances are not AR(1).
k_posdef_idio_M : int
The number of state disturbance terms associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_posdef_idio_Q : int
The number of state disturbance terms associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q`, even if the disturbances are not AR(1).
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process.
factor_blocks : list of FactorBlock
List of `FactorBlock` helper instances for each factor block.
factor_names : list of str
List of factor names.
factors : dict
Dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
Note that factor multiplicities will have already been expanded.
factor_orders : dict
Dictionary with:
- keys : tuple of factor names
- values : integer describing autoregression order
Note that factor multiplicities will have already been expanded.
max_factor_order : int
Maximum autoregression order across all factor blocks.
factor_block_orders : pd.Series
Series containing lag orders, with the factor block (a tuple of factor
names) as the index.
factor_multiplicities : dict
Dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
endog_factor_map : dict
Dictionary with:
- keys : endog name
- values : list of factor names
loading_counts : pd.Series
Series containing number of endogenous variables loading on each
factor, with the factor name as the index.
block_loading_counts : dict
Dictionary with:
- keys : tuple of factor names
- values : average number of endogenous variables loading on the block
(note that average is over the factors in the block)
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector.
Note that the ordering of the factor blocks in the state vector is
determined by the `factor_orders` argument if a dictionary. Otherwise,
factors are ordered so that factors that load on more variables come first
(and then alphabetically, to break ties).
- `factors_L1` is an array with the indexes of first lag of the factors
from each block. Ordered first by block, and then by lag.
- `factors_L1_5` is an array with the indexes contains the first - fifth
lags of the factors from each block. Ordered first by block, and then by
lag.
- `factors_L1_5_ix` is an array shaped (5, k_factors) with the indexes
of the first - fifth lags of the factors from each block.
- `idio_ar_L1` is an array with the indexes of the first lag of the
idiosyncratic AR states, both monthly (if appliable) and quarterly.
- `idio_ar_M` is a slice with the indexes of the idiosyncratic disturbance
states for the monthly (or non-time-specific if there are no quarterly
variables) variables. It is an empty slice if
`idiosyncratic_ar1 = False`.
- `idio_ar_Q` is a slice with the indexes of the idiosyncratic disturbance
states and all lags, for the quarterly variables. It is an empty slice if
there are no quarterly variable.
- `idio_ar_Q_ix` is an array shaped (k_endog_Q, 5) with the indexes of the
first - fifth lags of the idiosyncratic disturbance states for the
quarterly variables.
- `endog_factor_iloc` is a list of lists, with entries for each endogenous
variable. The entry for variable `i`, `endog_factor_iloc[i]` is a list of
indexes of the factors that variable `i` loads on. This does not include
any lags, but it can be used with e.g. `factors_L1_5_ix` to get lags.
"""
def __init__(self, k_endog_M, k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1):
# Save model parameterization
self.k_endog_M = k_endog_M
self.k_endog_Q = k_endog_Q
self.k_endog = self.k_endog_M + self.k_endog_Q
self.idiosyncratic_ar1 = idiosyncratic_ar1
# Validate factor-related inputs
factors_is_int = np.issubdtype(type(factors), np.integer)
factors_is_list = isinstance(factors, (list, tuple))
orders_is_int = np.issubdtype(type(factor_orders), np.integer)
if factor_multiplicities is None:
factor_multiplicities = 1
mult_is_int = np.issubdtype(type(factor_multiplicities), np.integer)
if not (factors_is_int or factors_is_list or
isinstance(factors, dict)):
raise ValueError('`factors` argument must an integer number of'
' factors, a list of global factor names, or a'
' dictionary, mapping observed variables to'
' factors.')
if not (orders_is_int or isinstance(factor_orders, dict)):
raise ValueError('`factor_orders` argument must either be an'
' integer or a dictionary.')
if not (mult_is_int or isinstance(factor_multiplicities, dict)):
raise ValueError('`factor_multiplicities` argument must either be'
' an integer or a dictionary.')
# Expand integers
# If `factors` is an integer, we assume that it denotes the number of
# global factors (factors that load on each variable)
if factors_is_int or factors_is_list:
# Validate this here for a more informative error message
if ((factors_is_int and factors == 0) or
(factors_is_list and len(factors) == 0)):
raise ValueError('The model must contain at least one factor.')
if factors_is_list:
factor_names = list(factors)
else:
factor_names = [f'{i}' for i in range(factors)]
factors = {name: factor_names[:] for name in endog_names}
factor_names = set(np.concatenate(list(factors.values())))
if orders_is_int:
factor_orders = {factor_name: factor_orders
for factor_name in factor_names}
if mult_is_int:
factor_multiplicities = {factor_name: factor_multiplicities
for factor_name in factor_names}
# Apply the factor multiplities
factors, factor_orders = self._apply_factor_multiplicities(
factors, factor_orders, factor_multiplicities)
# Save the (potentially expanded) variables
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
# Get the mapping between endog and factors
self.endog_factor_map = self._construct_endog_factor_map(
factors, endog_names)
self.k_factors = self.endog_factor_map.shape[1]
# Validate number of factors
# TODO: could do more extensive validation here.
if self.k_factors > self.k_endog_M:
raise ValueError(f'Number of factors ({self.k_factors}) cannot be'
' greater than the number of monthly endogenous'
f' variables ({self.k_endog_M}).')
# Get `loading_counts`: factor -> # endog loading on the factor
self.loading_counts = (
self.endog_factor_map.sum(axis=0).rename('count')
.reset_index().sort_values(['count', 'factor'],
ascending=[False, True])
.set_index('factor'))
# `block_loading_counts`: block -> average of (# loading on factor)
# across each factor in the block
block_loading_counts = {
block: np.atleast_1d(
self.loading_counts.loc[list(block), 'count']).mean(axis=0)
for block in factor_orders.keys()}
ix = pd.Index(block_loading_counts.keys(), tupleize_cols=False,
name='block')
self.block_loading_counts = pd.Series(
list(block_loading_counts.values()),
index=ix, name='count').to_frame().sort_values(
['count', 'block'], ascending=[False, True])['count']
# Get the mapping between factor blocks and VAR order
# `factor_block_orders`: pd.Series of factor block -> lag order
ix = pd.Index(factor_orders.keys(), tupleize_cols=False, name='block')
self.factor_block_orders = pd.Series(
list(factor_orders.values()), index=ix, name='order')
# If the `factor_orders` variable was an integer, then it did not
# define an ordering for the factor blocks. In this case, we use the
# loading counts to do so. This ensures that e.g. global factors are
# listed first.
if orders_is_int:
keys = self.block_loading_counts.keys()
self.factor_block_orders = self.factor_block_orders.loc[keys]
self.factor_block_orders.index.name = 'block'
# Define factor_names based on factor_block_orders (instead of on those
# from `endog_factor_map`) to (a) make sure that factors are allocated
# to only one block, and (b) order the factor names to be consistent
# with the block definitions.
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
missing = [name for name in self.endog_factor_map.columns
if name not in factor_names.tolist()]
if len(missing):
ix = pd.Index([(factor_name,) for factor_name in missing],
tupleize_cols=False, name='block')
default_block_orders = pd.Series(np.ones(len(ix), dtype=int),
index=ix, name='order')
self.factor_block_orders = (
self.factor_block_orders.append(default_block_orders))
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
duplicates = factor_names.duplicated()
if duplicates.any():
duplicate_names = set(factor_names[duplicates])
raise ValueError('Each factor can be assigned to at most one'
' block of factors in `factor_orders`.'
f' Duplicate entries for {duplicate_names}')
self.factor_names = factor_names.tolist()
self.max_factor_order = np.max(self.factor_block_orders)
# Re-order the columns of the endog factor mapping to reflect the
# orderings of endog_names and factor_names
self.endog_factor_map = (
self.endog_factor_map.loc[endog_names, factor_names])
# Create factor block helpers, and get factor-related state and posdef
# dimensions
self.k_states_factors = 0
self.k_posdef_factors = 0
state_offset = 0
self.factor_blocks = []
for factor_names, factor_order in self.factor_block_orders.items():
block = FactorBlock(factor_names, factor_order,
self.endog_factor_map, state_offset,
self.k_endog_Q)
self.k_states_factors += block.k_states
self.k_posdef_factors += block.k_factors
state_offset += block.k_states
self.factor_blocks.append(block)
# Idiosyncratic state dimensions
self.k_states_idio_M = self.k_endog_M if idiosyncratic_ar1 else 0
self.k_states_idio_Q = self.k_endog_Q * 5
self.k_states_idio = self.k_states_idio_M + self.k_states_idio_Q
# Idiosyncratic posdef dimensions
self.k_posdef_idio_M = self.k_endog_M if self.idiosyncratic_ar1 else 0
self.k_posdef_idio_Q = self.k_endog_Q
self.k_posdef_idio = self.k_posdef_idio_M + self.k_posdef_idio_Q
# Total states, posdef
self.k_states = self.k_states_factors + self.k_states_idio
self.k_posdef = self.k_posdef_factors + self.k_posdef_idio
# Cache
self._endog_factor_iloc = None
def _apply_factor_multiplicities(self, factors, factor_orders,
factor_multiplicities):
"""
Expand `factors` and `factor_orders` to account for factor multiplity.
For example, if there is a `global` factor with multiplicity 2, then
this method expands that into `global.1` and `global.2` in both the
`factors` and `factor_orders` dictionaries.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
factor_orders : dict
Dictionary of {tuple of factor names: factor order}
factor_multiplicities : dict
Dictionary of {factor name: factor multiplicity}
Returns
-------
new_factors : dict
Dictionary of {endog_name: list of factor names}, with factor names
expanded to incorporate multiplicities.
new_factors : dict
Dictionary of {tuple of factor names: factor order}, with factor
names in each tuple expanded to incorporate multiplicities.
"""
# Expand the factors to account for the multiplicities
new_factors = {}
for endog_name, factors_list in factors.items():
new_factor_list = []
for factor_name in factors_list:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_factor_list += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_factor_list.append(factor_name)
new_factors[endog_name] = new_factor_list
# Expand the factor orders to account for the multiplicities
new_factor_orders = {}
for block, factor_order in factor_orders.items():
if not isinstance(block, tuple):
block = (block,)
new_block = []
for factor_name in block:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_block += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_block += [factor_name]
new_factor_orders[tuple(new_block)] = factor_order
return new_factors, new_factor_orders
def _construct_endog_factor_map(self, factors, endog_names):
"""
Construct mapping of observed variables to factors.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
endog_names : list of str
List of the names of the observed variables.
Returns
-------
endog_factor_map : pd.DataFrame
Boolean dataframe with `endog_names` as the index and the factor
names (computed from the `factors` input) as the columns. Each cell
is True if the associated factor is allowed to load on the
associated observed variable.
"""
# Validate that all entries in the factors dictionary have associated
# factors
missing = []
for key, value in factors.items():
if not isinstance(value, (list, tuple)) or len(value) == 0:
missing.append(key)
if len(missing):
raise ValueError('Each observed variable must be mapped to at'
' least one factor in the `factors` dictionary.'
f' Variables missing factors are: {missing}.')
# Validate that we have been told about the factors for each endog
# variable. This is because it doesn't make sense to include an
# observed variable that doesn't load on any factor
missing = set(endog_names).difference(set(factors.keys()))
if len(missing):
raise ValueError('If a `factors` dictionary is provided, then'
' it must include entries for each observed'
f' variable. Missing variables are: {missing}.')
# Figure out the set of factor names
# (0 is just a dummy value for the dict - we just do it this way to
# collect the keys, in order, without duplicates.)
factor_names = {}
for key, value in factors.items():
if isinstance(value, str):
factor_names[value] = 0
else:
factor_names.update({v: 0 for v in value})
factor_names = list(factor_names.keys())
k_factors = len(factor_names)
endog_factor_map = pd.DataFrame(
np.zeros((self.k_endog, k_factors), dtype=bool),
index=pd.Index(endog_names, name='endog'),
columns=pd.Index(factor_names, name='factor'))
for key, value in factors.items():
endog_factor_map.loc[key, value] = True
return endog_factor_map
@property
def factors_L1(self):
"""Factors."""
ix = np.arange(self.k_states_factors)
iloc = tuple(ix[block.factors_L1] for block in self.factor_blocks)
return np.concatenate(iloc)
@property
def factors_L1_5_ix(self):
"""Factors plus any lags, index shaped (5, k_factors)."""
ix = np.arange(self.k_states_factors)
iloc = []
for block in self.factor_blocks:
iloc.append(ix[block.factors_L1_5].reshape(5, block.k_factors))
return np.concatenate(iloc, axis=1)
@property
def idio_ar_L1(self):
"""Idiosyncratic AR states, (first block / lag only)."""
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog
else:
ix2 = ix1 + self.k_endog_Q
return np.s_[ix1:ix2]
@property
def idio_ar_M(self):
"""Idiosyncratic AR states for monthly variables."""
ix1 = self.k_states_factors
ix2 = ix1
if self.idiosyncratic_ar1:
ix2 += self.k_endog_M
return np.s_[ix1:ix2]
@property
def idio_ar_Q(self):
"""Idiosyncratic AR states and all lags for quarterly variables."""
# Note that this is equivalent to idio_ar_Q_ix with ravel(order='F')
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix1 += self.k_endog_M
ix2 = ix1 + self.k_endog_Q * 5
return np.s_[ix1:ix2]
@property
def idio_ar_Q_ix(self):
"""Idiosyncratic AR (quarterly) state index, (k_endog_Q, lags)."""
# i.e. the position in the state vector of the second lag of the third
# quarterly variable is idio_ar_Q_ix[2, 1]
# ravel(order='F') gives e.g (y1.L1, y2.L1, y1.L2, y2.L3, y1.L3, ...)
# while
# ravel(order='C') gives e.g (y1.L1, y1.L2, y1.L3, y2.L1, y2.L2, ...)
start = self.k_states_factors
if self.idiosyncratic_ar1:
start += self.k_endog_M
return (start + np.reshape(
np.arange(5 * self.k_endog_Q), (5, self.k_endog_Q)).T)
@property
def endog_factor_iloc(self):
"""List of list of int, factor indexes for each observed variable."""
# i.e. endog_factor_iloc[i] is a list of integer locations of the
# factors that load on the ith observed variable
if self._endog_factor_iloc is None:
ilocs = []
for i in range(self.k_endog):
ilocs.append(np.where(self.endog_factor_map.iloc[i])[0])
self._endog_factor_iloc = ilocs
return self._endog_factor_iloc
def __getitem__(self, key):
"""
Use square brackets to access index / slice elements.
This is convenient in highlighting the indexing / slice quality of
these attributes in the code below.
"""
if key in ['factors_L1', 'factors_L1_5_ix', 'idio_ar_L1', 'idio_ar_M',
'idio_ar_Q', 'idio_ar_Q_ix']:
return getattr(self, key)
else:
raise KeyError(key)
class DynamicFactorMQ(mlemodel.MLEModel):
r"""
Dynamic factor model with EM algorithm; option for monthly/quarterly data.
Implementation of the dynamic factor model of Bańbura and Modugno (2014)
([1]_) and Bańbura, Giannone, and Reichlin (2011) ([2]_). Uses the EM
algorithm for parameter fitting, and so can accommodate a large number of
left-hand-side variables. Specifications can include any collection of
blocks of factors, including different factor autoregression orders, and
can include AR(1) processes for idiosyncratic disturbances. Can
incorporate monthly/quarterly mixed frequency data along the lines of
Mariano and Murasawa (2011) ([4]_). A special case of this model is the
Nowcasting model of Bok et al. (2017) ([3]_). Moreover, this model can be
used to compute the news associated with updated data releases.
Parameters
----------
endog : array_like
Observed time-series process :math:`y`. See the "Notes" section for
details on how to set up a model with monthly/quarterly mixed frequency
data.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which the
provided `endog` dataset contains both the monthly and quarterly data,
this variable should be used to indicate how many of the variables
are monthly. Note that when using the `k_endog_monthly` argument, the
columns with monthly variables in `endog` should be ordered first, and
the columns with quarterly variables should come afterwards. See the
"Notes" section for details on how to set up a model with
monthly/quarterly mixed frequency data.
factors : int, list, or dict, optional
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, .... The
default is a single factor that loads on all variables. Note that there
cannot be more factors specified than there are monthly variables.
factor_orders : int or dict, optional
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict, optional
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
standardize : bool or tuple, optional
If a boolean, whether or not to standardize each endogenous variable to
have mean zero and standard deviation 1 before fitting the model. See
"Notes" for details about how this option works with postestimation
output. If a tuple (usually only used internally), then the tuple must
have length 2, with each element containing a Pandas series with index
equal to the names of the endogenous variables. The first element
should contain the mean values and the second element should contain
the standard deviations. Default is True.
endog_quarterly : pandas.Series or pandas.DataFrame
Observed quarterly variables. If provided, must be a Pandas Series or
DataFrame with a DatetimeIndex or PeriodIndex at the quarterly
frequency. See the "Notes" section for details on how to set up a model
with monthly/quarterly mixed frequency data.
init_t0 : bool, optional
If True, this option initializes the Kalman filter with the
distribution for :math:`\alpha_0` rather than :math:`\alpha_1`. See
the "Notes" section for more details. This option is rarely used except
for testing. Default is False.
obs_cov_diag : bool, optional
If True and if `idiosyncratic_ar1 is True`, then this option puts small
positive values in the observation disturbance covariance matrix. This
is not required for estimation and is rarely used except for testing.
(It is sometimes used to prevent numerical errors, for example those
associated with a positive semi-definite forecast error covariance
matrix at the first time step when using EM initialization, but state
space models in Statsmodels switch to the univariate approach in those
cases, and so do not need to use this trick). Default is False.
Notes
-----
The basic model is:
.. math::
y_t & = \Lambda f_t + \epsilon_t \\
f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + u_t
where:
- :math:`y_t` is observed data at time t
- :math:`\epsilon_t` is idiosyncratic disturbance at time t (see below for
details, including modeling serial correlation in this term)
- :math:`f_t` is the unobserved factor at time t
- :math:`u_t \sim N(0, Q)` is the factor disturbance at time t
and:
- :math:`\Lambda` is referred to as the matrix of factor loadings
- :math:`A_i` are matrices of autoregression coefficients
Furthermore, we allow the idiosyncratic disturbances to be serially
correlated, so that, if `idiosyncratic_ar1=True`,
:math:`\epsilon_{i,t} = \rho_i \epsilon_{i,t-1} + e_{i,t}`, where
:math:`e_{i,t} \sim N(0, \sigma_i^2)`. If `idiosyncratic_ar1=False`,
then we instead have :math:`\epsilon_{i,t} = e_{i,t}`.
This basic setup can be found in [1]_, [2]_, [3]_, and [4]_.
We allow for two generalizations of this model:
1. Following [2]_, we allow multiple "blocks" of factors, which are
independent from the other blocks of factors. Different blocks can be
set to load on different subsets of the observed variables, and can be
specified with different lag orders.
2. Following [4]_ and [2]_, we allow mixed frequency models in which both
monthly and quarterly data are used. See the section on "Mixed frequency
models", below, for more details.
Additional notes:
- The observed data may contain arbitrary patterns of missing entries.
**EM algorithm**
This model contains a potentially very large number of parameters, and it
can be difficult and take a prohibitively long time to numerically optimize
the likelihood function using quasi-Newton methods. Instead, the default
fitting method in this model uses the EM algorithm, as detailed in [1]_.
As a result, the model can accommodate datasets with hundreds of
observed variables.
**Mixed frequency data**
This model can handle mixed frequency data in two ways. In this section,
we only briefly describe this, and refer readers to [2]_ and [4]_ for all
details.
First, because there can be arbitrary patterns of missing data in the
observed vector, one can simply include lower frequency variables as
observed in a particular higher frequency period, and missing otherwise.
For example, in a monthly model, one could include quarterly data as
occurring on the third month of each quarter. To use this method, one
simply needs to combine the data into a single dataset at the higher
frequency that can be passed to this model as the `endog` argument.
However, depending on the type of variables used in the analysis and the
assumptions about the data generating process, this approach may not be
valid.
For example, suppose that we are interested in the growth rate of real GDP,
which is measured at a quarterly frequency. If the basic factor model is
specified at a monthly frequency, then the quarterly growth rate in the
third month of each quarter -- which is what we actually observe -- is
approximated by a particular weighted average of unobserved monthly growth
rates. We need to take this particular weight moving average into account
in constructing our model, and this is what the second approach does.
The second approach follows [2]_ and [4]_ in constructing a state space
form to explicitly model the quarterly growth rates in terms of the
unobserved monthly growth rates. To use this approach, there are two
methods:
1. Combine the monthly and quarterly data into a single dataset at the
monthly frequency, with the monthly data in the first columns and the
quarterly data in the last columns. Pass this dataset to the model as
the `endog` argument and give the number of the variables that are
monthly as the `k_endog_monthly` argument.
2. Construct a monthly dataset as a Pandas DataFrame with a DatetimeIndex
or PeriodIndex at the monthly frequency and separately construct a
quarterly dataset as a Pandas DataFrame with a DatetimeIndex or
PeriodIndex at the quarterly frequency. Pass the monthly DataFrame to
the model as the `endog` argument and pass the quarterly DataFrame to
the model as the `endog_quarterly` argument.
Note that this only incorporates one particular type of mixed frequency
data. See also Banbura et al. (2013). "Now-Casting and the Real-Time Data
Flow." for discussion about other types of mixed frequency data that are
not supported by this framework.
**Nowcasting and the news**
Through its support for monthly/quarterly mixed frequency data, this model
can allow for the nowcasting of quarterly variables based on monthly
observations. In particular, [2]_ and [3]_ use this model to construct
nowcasts of real GDP and analyze the impacts of "the news", derived from
incoming data on a real-time basis. This latter functionality can be
accessed through the `news` method of the results object.
**Standardizing data**
As is often the case in formulating a dynamic factor model, we do not
explicitly account for the mean of each observed variable. Instead, the
default behavior is to standardize each variable prior to estimation. Thus
if :math:`y_t` are the given observed data, the dynamic factor model is
actually estimated on the standardized data defined by:
.. math::
x_{i, t} = (y_{i, t} - \bar y_i) / s_i
where :math:`\bar y_i` is the sample mean and :math:`s_i` is the sample
standard deviation.
By default, if standardization is applied prior to estimation, results such
as in-sample predictions, out-of-sample forecasts, and the computation of
the "news" are reported in the scale of the original data (i.e. the model
output has the reverse transformation applied before it is returned to the
user).
Standardization can be disabled by passing `standardization=False` to the
model constructor.
**Identification of factors and loadings**
The estimated factors and the factor loadings in this model are only
identified up to an invertible transformation. As described in (the working
paper version of) [2]_, while it is possible to impose normalizations to
achieve identification, the EM algorithm does will converge regardless.
Moreover, for nowcasting and forecasting purposes, identification is not
required. This model does not impose any normalization to identify the
factors and the factor loadings.
**Miscellaneous**
There are two arguments available in the model constructor that are rarely
used but which deserve a brief mention: `init_t0` and `obs_cov_diag`. These
arguments are provided to allow exactly matching the output of other
packages that have slight differences in how the underlying state space
model is set up / applied.
- `init_t0`: state space models in Statsmodels follow Durbin and Koopman in
initializing the model with :math:`\alpha_1 \sim N(a_1, P_1)`. Other
implementations sometimes initialize instead with
:math:`\alpha_0 \sim N(a_0, P_0)`. We can accommodate this by prepending
a row of NaNs to the observed dataset.
- `obs_cov_diag`: the state space form in [1]_ incorporates non-zero (but
very small) diagonal elements for the observation disturbance covariance
matrix.
Examples
--------
Constructing and fitting a `DynamicFactorMQ` model.
>>> data = sm.datasets.macrodata.load_pandas().data.iloc[-100:]
>>> data.index = pd.period_range(start='1984Q4', end='2009Q3', freq='Q')
>>> endog = data[['infl', 'tbilrate']].resample('M').last()
>>> endog_Q = np.log(data[['realgdp', 'realcons']]).diff().iloc[1:] * 400
**Basic usage**
In the simplest case, passing only the `endog` argument results in a model
with a single factor that follows an AR(1) process. Note that because we
are not also providing an `endog_quarterly` dataset, `endog` can be a numpy
array or Pandas DataFrame with any index (it does not have to be monthly).
The `summary` method can be useful in checking the model specification.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
**Factors**
With `factors=2`, there will be two independent factors that will each
evolve according to separate AR(1) processes.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 1
1 1
=====================
**Factor multiplicities**
By instead specifying `factor_multiplicities=2`, we would still have two
factors, but they would be dependent and would evolve jointly according
to a VAR(1) process.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factor_multiplicities=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 1 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0.1 0.2
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0.1, 0.2 1
=====================
**Factor orders**
In either of the above cases, we could extend the order of the (vector)
autoregressions by using the `factor_orders` argument. For example, the
below model would contain two independent factors that each evolve
according to a separate AR(2) process:
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2, factor_orders=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 2
1 2
=====================
**Serial correlation in the idiosyncratic disturbances**
By default, the model allows each idiosyncratic disturbance terms to evolve
according to an AR(1) process. If preferred, they can instead be specified
to be serially independent by passing `ididosyncratic_ar1=False`.
>>> mod = sm.tsa.DynamicFactorMQ(endog, idiosyncratic_ar1=False)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ iid idiosyncratic Idiosyncratic disturbances: iid
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Monthly / Quarterly mixed frequency*
To specify a monthly / quarterly mixed frequency model see the (Notes
section for more details about these models):
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
realgdp X
realcons X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Customize observed variable / factor loadings*
To specify that certain that certain observed variables only load on
certain factors, it is possible to pass a dictionary to the `factors`
argument.
>>> factors = {'infl': ['global']
... 'tbilrate': ['global']
... 'realgdp': ['global', 'real']
... 'realcons': ['global', 'real']}
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factor blocks: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable global real
-----------------------------------
infl X
tbilrate X
realgdp X X
realcons X X
Factor blocks:
=====================
block order
---------------------
global 1
real 1
=====================
**Fitting parameters**
To fit the model, use the `fit` method. This method uses the EM algorithm
by default.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.summary())
Dynamic Factor Results
==========================================================================
Dep. Variable: ['infl', 'tbilrate'] No. Observations: 300
Model: Dynamic Factor Model Log Likelihood -127.909
+ 1 factors in 1 blocks AIC 271.817
+ AR(1) idiosyncratic BIC 301.447
Date: Tue, 04 Aug 2020 HQIC 283.675
Time: 15:59:11 EM Iterations 83
Sample: 10-31-1984
- 09-30-2009
Covariance Type: Not computed
Observation equation:
==============================================================
Factor loadings: 0 idiosyncratic: AR(1) var.
--------------------------------------------------------------
infl -0.67 0.39 0.73
tbilrate -0.63 0.99 0.01
Transition: Factor block 0
=======================================
L1.0 error variance
---------------------------------------
0 0.98 0.01
=======================================
Warnings:
[1] Covariance matrix not calculated.
*Displaying iteration progress*
To display information about the EM iterations, use the `disp` argument.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit(disp=10)
EM start iterations, llf=-291.21
EM iteration 10, llf=-157.17, convergence criterion=0.053801
EM iteration 20, llf=-128.99, convergence criterion=0.0035545
EM iteration 30, llf=-127.97, convergence criterion=0.00010224
EM iteration 40, llf=-127.93, convergence criterion=1.3281e-05
EM iteration 50, llf=-127.92, convergence criterion=5.4725e-06
EM iteration 60, llf=-127.91, convergence criterion=2.8665e-06
EM iteration 70, llf=-127.91, convergence criterion=1.6999e-06
EM iteration 80, llf=-127.91, convergence criterion=1.1085e-06
EM converged at iteration 83, llf=-127.91,
convergence criterion=9.9004e-07 < tolerance=1e-06
**Results: forecasting, impulse responses, and more**
One the model is fitted, there are a number of methods available from the
results object. Some examples include:
*Forecasting*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.forecast(steps=5))
infl tbilrate
2009-10 1.784169 0.260401
2009-11 1.735848 0.305981
2009-12 1.730674 0.350968
2010-01 1.742110 0.395369
2010-02 1.759786 0.439194
*Impulse responses*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.impulse_responses(steps=5))
infl tbilrate
0 -1.511956 -1.341498
1 -1.483172 -1.315960
2 -1.454937 -1.290908
3 -1.427240 -1.266333
4 -1.400069 -1.242226
5 -1.373416 -1.218578
For other available methods (including in-sample prediction, simulation of
time series, extending the results to incorporate new data, and the news),
see the documentation for state space models.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bok, Brandyn, Daniele Caratelli, Domenico Giannone,
Argia M. Sbordone, and Andrea Tambalotti. 2018.
"Macroeconomic Nowcasting and Forecasting with Big Data."
Annual Review of Economics 10 (1): 615-43.
https://doi.org/10.1146/annurev-economics-080217-053214.
.. [4] Mariano, Roberto S., and Yasutomo Murasawa.
"A coincident index, common factors, and monthly real GDP."
Oxford Bulletin of Economics and Statistics 72, no. 1 (2010): 27-46.
"""
def __init__(self, endog, k_endog_monthly=None, factors=1, factor_orders=1,
factor_multiplicities=None, idiosyncratic_ar1=True,
standardize=True, endog_quarterly=None, init_t0=False,
obs_cov_diag=False, **kwargs):
# Handle endog variables
if endog_quarterly is not None:
if k_endog_monthly is not None:
raise ValueError('If `endog_quarterly` is specified, then'
' `endog` must contain only monthly'
' variables, and so `k_endog_monthly` cannot'
' be specified since it will be inferred from'
' the shape of `endog`.')
endog, k_endog_monthly = self.construct_endog(
endog, endog_quarterly)
endog_is_pandas = _is_using_pandas(endog, None)
if endog_is_pandas:
if isinstance(endog, pd.Series):
endog = endog.to_frame()
else:
if np.ndim(endog) < 2:
endog = np.atleast_2d(endog).T
if k_endog_monthly is None:
k_endog_monthly = endog.shape[1]
if endog_is_pandas:
endog_names = endog.columns.tolist()
else:
if endog.shape[1] == 1:
endog_names = ['y']
else:
endog_names = [f'y{i + 1}' for i in range(endog.shape[1])]
self.k_endog_M = int_like(k_endog_monthly, 'k_endog_monthly')
self.k_endog_Q = endog.shape[1] - self.k_endog_M
# Compute helper for handling factors / state indexing
s = self._s = DynamicFactorMQStates(
self.k_endog_M, self.k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1)
# Save parameterization
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
self.endog_factor_map = self._s.endog_factor_map
self.factor_block_orders = self._s.factor_block_orders
self.factor_names = self._s.factor_names
self.k_factors = self._s.k_factors
self.k_factor_blocks = len(self.factor_block_orders)
self.max_factor_order = self._s.max_factor_order
self.idiosyncratic_ar1 = idiosyncratic_ar1
self.init_t0 = init_t0
self.obs_cov_diag = obs_cov_diag
if self.init_t0:
# TODO: test each of these options
if endog_is_pandas:
ix = pd.period_range(endog.index[0] - 1, endog.index[-1],
freq='M')
endog = endog.reindex(ix)
else:
endog = np.c_[[np.nan] * endog.shape[1], endog.T].T
# Standardize endog, if requested
# Note: endog_mean and endog_std will always each be 1-dimensional with
# length equal to the number of endog variables
if isinstance(standardize, tuple) and len(standardize) == 2:
endog_mean, endog_std = standardize
# Validate the input
n = endog.shape[1]
if (isinstance(endog_mean, pd.Series) and not
endog_mean.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_mean.index}.')
else:
endog_mean = np.atleast_1d(endog_mean)
if (isinstance(endog_std, pd.Series) and not
endog_std.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_std.index}.')
else:
endog_std = np.atleast_1d(endog_std)
if (np.shape(endog_mean) != (n,) or np.shape(endog_std) != (n,)):
raise ValueError('Invalid value passed for `standardize`: each'
f' element must be shaped ({n},).')
standardize = True
# Make sure we have Pandas if endog is Pandas
if endog_is_pandas:
endog_mean = pd.Series(endog_mean, index=endog_names)
endog_std = pd.Series(endog_std, index=endog_names)
elif standardize in [1, True]:
endog_mean = endog.mean(axis=0)
endog_std = endog.std(axis=0)
elif standardize in [0, False]:
endog_mean = np.zeros(endog.shape[1])
endog_std = np.ones(endog.shape[1])
else:
raise ValueError('Invalid value passed for `standardize`.')
self._endog_mean = endog_mean
self._endog_std = endog_std
self.standardize = standardize
if np.any(self._endog_std < 1e-10):
ix = np.where(self._endog_std < 1e-10)
names = np.array(endog_names)[ix[0]].tolist()
raise ValueError('Constant variable(s) found in observed'
' variables, but constants cannot be included'
f' in this model. These variables are: {names}.')
if self.standardize:
endog = (endog - self._endog_mean) / self._endog_std
# Observation / states slices
o = self._o = {
'M': np.s_[:self.k_endog_M],
'Q': np.s_[self.k_endog_M:]}
# Construct the basic state space representation
super().__init__(endog, k_states=s.k_states, k_posdef=s.k_posdef,
**kwargs)
# Revert the standardization for orig_endog
if self.standardize:
self.data.orig_endog = (
self.data.orig_endog * self._endog_std + self._endog_mean)
# State initialization
# Note: we could just initialize the entire thing as stationary, but
# doing each block separately should be faster and avoid numerical
# issues
if 'initialization' not in kwargs:
self.ssm.initialize(self._default_initialization())
# Fixed components of the state space representation
# > design
if self.idiosyncratic_ar1:
self['design', o['M'], s['idio_ar_M']] = np.eye(self.k_endog_M)
multipliers = [1, 2, 3, 2, 1]
for i in range(len(multipliers)):
m = multipliers[i]
self['design', o['Q'], s['idio_ar_Q_ix'][:, i]] = (
m * np.eye(self.k_endog_Q))
# > obs cov
if self.obs_cov_diag:
self['obs_cov'] = np.eye(self.k_endog) * 1e-4
# > transition
for block in s.factor_blocks:
if block.k_factors == 1:
tmp = 0
else:
tmp = np.zeros((block.k_factors, block.k_factors))
self['transition', block['factors'], block['factors']] = (
companion_matrix([1] + [tmp] * block._factor_order).T)
if self.k_endog_Q == 1:
tmp = 0
else:
tmp = np.zeros((self.k_endog_Q, self.k_endog_Q))
self['transition', s['idio_ar_Q'], s['idio_ar_Q']] = (
companion_matrix([1] + [tmp] * 5).T)
# > selection
ix1 = ix2 = 0
for block in s.factor_blocks:
ix2 += block.k_factors
self['selection', block['factors_ix'][:, 0], ix1:ix2] = (
np.eye(block.k_factors))
ix1 = ix2
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog_M
self['selection', s['idio_ar_M'], ix1:ix2] = np.eye(self.k_endog_M)
ix1 = ix2
ix2 = ix1 + self.k_endog_Q
self['selection', s['idio_ar_Q_ix'][:, 0], ix1:ix2] = (
np.eye(self.k_endog_Q))
# Parameters
self.params = OrderedDict([
('loadings', np.sum(self.endog_factor_map.values)),
('factor_ar', np.sum([block.k_factors**2 * block.factor_order
for block in s.factor_blocks])),
('factor_cov', np.sum([block.k_factors * (block.k_factors + 1) // 2
for block in s.factor_blocks])),
('idiosyncratic_ar1',
self.k_endog if self.idiosyncratic_ar1 else 0),
('idiosyncratic_var', self.k_endog)])
self.k_params = np.sum(list(self.params.values()))
# Parameter slices
ix = np.split(np.arange(self.k_params),
np.cumsum(list(self.params.values()))[:-1])
self._p = dict(zip(self.params.keys(), ix))
# Cache
self._loading_constraints = {}
# Initialization kwarg keys, e.g. for cloning
self._init_keys += [
'factors', 'factor_orders', 'factor_multiplicities',
'idiosyncratic_ar1', 'standardize', 'init_t0',
'obs_cov_diag'] + list(kwargs.keys())
@classmethod
def construct_endog(cls, endog_monthly, endog_quarterly):
"""
Construct a combined dataset from separate monthly and quarterly data.
Parameters
----------
endog_monthly : array_like
Monthly dataset. If a quarterly dataset is given, then this must
be a Pandas object with a PeriodIndex or DatetimeIndex at a monthly
frequency.
endog_quarterly : array_like or None
Quarterly dataset. If not None, then this must be a Pandas object
with a PeriodIndex or DatetimeIndex at a quarterly frequency.
Returns
-------
endog : array_like
If both endog_monthly and endog_quarterly were given, this is a
Pandas DataFrame with a PeriodIndex at the monthly frequency, with
all of the columns from `endog_monthly` ordered first and the
columns from `endog_quarterly` ordered afterwards. Otherwise it is
simply the input `endog_monthly` dataset.
k_endog_monthly : int
The number of monthly variables (which are ordered first) in the
returned `endog` dataset.
"""
# Create combined dataset
if endog_quarterly is not None:
# Validate endog_monthly
base_msg = ('If given both monthly and quarterly data'
' then the monthly dataset must be a Pandas'
' object with a date index at a monthly frequency.')
if not isinstance(endog_monthly, (pd.Series, pd.DataFrame)):
raise ValueError('Given monthly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_monthly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given monthly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_monthly.index, 'freqstr', 'N')[0] == 'M':
freqstr = getattr(endog_monthly.index, 'freqstr', 'None')
raise ValueError('Index of given monthly dataset has a'
' non-monthly frequency (to check this,'
' examine the `freqstr` attribute of the'
' index of the dataset - it should start with'
' M if it is monthly).'
f' Got {freqstr}. ' + base_msg)
# Validate endog_quarterly
base_msg = ('If a quarterly dataset is given, then it must be a'
' Pandas object with a date index at a quarterly'
' frequency.')
if not isinstance(endog_quarterly, (pd.Series, pd.DataFrame)):
raise ValueError('Given quarterly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_quarterly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given quarterly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_quarterly.index, 'freqstr', 'N')[0] == 'Q':
freqstr = getattr(endog_quarterly.index, 'freqstr', 'None')
raise ValueError('Index of given quarterly dataset'
' has a non-quarterly frequency (to check'
' this, examine the `freqstr` attribute of'
' the index of the dataset - it should start'
' with Q if it is quarterly).'
f' Got {freqstr}. ' + base_msg)
# Convert to PeriodIndex, if applicable
if hasattr(endog_monthly.index, 'to_period'):
endog_monthly = endog_monthly.to_period('M')
if hasattr(endog_quarterly.index, 'to_period'):
endog_quarterly = endog_quarterly.to_period('Q')
# Combine the datasets
endog = pd.concat([
endog_monthly,
endog_quarterly.resample('M', convention='end').first()],
axis=1)
# Make sure we didn't accidentally get duplicate column names
column_counts = endog.columns.value_counts()
if column_counts.max() > 1:
columns = endog.columns.values.astype(object)
for name in column_counts.index:
count = column_counts.loc[name]
if count == 1:
continue
mask = columns == name
columns[mask] = [f'{name}{i + 1}' for i in range(count)]
endog.columns = columns
else:
endog = endog_monthly.copy()
shape = endog_monthly.shape
k_endog_monthly = shape[1] if len(shape) == 2 else 1
return endog, k_endog_monthly
def clone(self, endog, k_endog_monthly=None, endog_quarterly=None,
retain_standardization=False, **kwargs):
"""
Clone state space model with new data and optionally new specification.
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
Observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
kwargs
Keyword arguments to pass to the new model class to change the
model specification.
Returns
-------
model : DynamicFactorMQ instance
"""
if retain_standardization and self.standardize:
kwargs['standardize'] = (self._endog_mean, self._endog_std)
mod = self._clone_from_init_kwds(
endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly, **kwargs)
return mod
@property
def _res_classes(self):
return {'fit': (DynamicFactorMQResults, mlemodel.MLEResultsWrapper)}
def _default_initialization(self):
s = self._s
init = initialization.Initialization(self.k_states)
for block in s.factor_blocks:
init.set(block['factors'], 'stationary')
if self.idiosyncratic_ar1:
for i in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init.set(i, 'stationary')
init.set(s['idio_ar_Q'], 'stationary')
return init
def _get_endog_names(self, truncate=None, as_string=None):
if truncate is None:
truncate = False if as_string is False or self.k_endog == 1 else 24
if as_string is False and truncate is not False:
raise ValueError('Can only truncate endog names if they'
' are returned as a string.')
if as_string is None:
as_string = truncate is not False
# The base `endog_names` property is only a list if there are at least
# two variables; often, we need it to be a list
endog_names = self.endog_names
if not isinstance(endog_names, list):
endog_names = [endog_names]
if as_string:
endog_names = [str(name) for name in endog_names]
if truncate is not False:
n = truncate
endog_names = [name if len(name) <= n else name[:n] + '...'
for name in endog_names]
return endog_names
@property
def _model_name(self):
model_name = [
'Dynamic Factor Model',
f'{self.k_factors} factors in {self.k_factor_blocks} blocks']
if self.k_endog_Q > 0:
model_name.append('Mixed frequency (M/Q)')
error_type = 'AR(1)' if self.idiosyncratic_ar1 else 'iid'
model_name.append(f'{error_type} idiosyncratic')
return model_name
def summary(self, truncate_endog_names=None):
"""
Create a summary table describing the model.
Parameters
----------
truncate_endog_names : int, optional
The number of characters to show for names of observed variables.
Default is 24 if there is more than one observed variable, or
an unlimited number of there is only one.
"""
# Get endog names
endog_names = self._get_endog_names(truncate=truncate_endog_names,
as_string=True)
title = 'Model Specification: Dynamic Factor Model'
if self._index_dates:
ix = self._index
d = ix[0]
sample = ['%s' % d]
d = ix[-1]
sample += ['- ' + '%s' % d]
else:
sample = [str(0), ' - ' + str(self.nobs)]
# Standardize the model name as a list of str
model_name = self._model_name
# - Top summary table ------------------------------------------------
top_left = []
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Sample:', [sample[0]]),
('', [sample[1]])]
top_right = []
if self.k_endog_Q > 0:
top_right += [
('# of monthly variables:', [self.k_endog_M]),
('# of quarterly variables:', [self.k_endog_Q])]
else:
top_right += [('# of observed variables:', [self.k_endog])]
if self.k_factor_blocks == 1:
top_right += [('# of factors:', [self.k_factors])]
else:
top_right += [('# of factor blocks:', [self.k_factor_blocks])]
top_right += [('Idiosyncratic disturbances:',
['AR(1)' if self.idiosyncratic_ar1 else 'iid']),
('Standardize variables:', [self.standardize])]
summary = Summary()
self.model = self
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
table_ix = 1
del self.model
# - Endog / factor map -----------------------------------------------
data = self.endog_factor_map.replace({True: 'X', False: ''})
data.index = endog_names
for name, col in data.iteritems():
data[name] = data[name] + (' ' * (len(name) // 2))
data.index.name = 'Dep. variable'
data = data.reset_index()
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Observed variables / factor loadings'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# - Factor blocks summary table --------------------------------------
data = self.factor_block_orders.reset_index()
data['block'] = data['block'].map(
lambda factor_names: ', '.join(factor_names))
data[['order']] = (
data[['order']].applymap(str))
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Factor blocks:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
return summary
def __str__(self):
"""Summary tables showing model specification."""
return str(self.summary())
@property
def state_names(self):
"""(list of str) List of human readable names for unobserved states."""
# Factors
state_names = []
for block in self._s.factor_blocks:
state_names += [f'{name}' for name in block.factor_names[:]]
for s in range(1, block._factor_order):
state_names += [f'L{s}.{name}'
for name in block.factor_names]
# Monthly error
endog_names = self._get_endog_names()
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
state_names += [f'eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
# Quarterly error
state_names += [f'eps_Q.{name}' for name in endog_names_Q]
for s in range(1, 5):
state_names += [f'L{s}.eps_Q.{name}' for name in endog_names_Q]
return state_names
@property
def param_names(self):
"""(list of str) List of human readable parameter names."""
param_names = []
# Loadings
# So that Lambda = params[ix].reshape(self.k_endog, self.k_factors)
# (where Lambda stacks Lambda_M and Lambda_Q)
endog_names = self._get_endog_names(as_string=False)
for endog_name in endog_names:
for block in self._s.factor_blocks:
for factor_name in block.factor_names:
if self.endog_factor_map.loc[endog_name, factor_name]:
param_names.append(
f'loading.{factor_name}->{endog_name}')
# Factor VAR
for block in self._s.factor_blocks:
for to_factor in block.factor_names:
param_names += [f'L{i}.{from_factor}->{to_factor}'
for i in range(1, block.factor_order + 1)
for from_factor in block.factor_names]
# Factor covariance
for i in range(len(self._s.factor_blocks)):
block = self._s.factor_blocks[i]
param_names += [f'fb({i}).cov.chol[{j + 1},{k + 1}]'
for j in range(block.k_factors)
for k in range(j + 1)]
# Error AR(1)
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
param_names += [f'L1.eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
param_names += [f'L1.eps_Q.{name}' for name in endog_names_Q]
# Error innovation variances
param_names += [f'sigma2.{name}' for name in endog_names]
return param_names
@property
def start_params(self):
"""(array) Starting parameters for maximum likelihood estimation."""
params = np.zeros(self.k_params, dtype=np.float64)
# (1) estimate factors one at a time, where the first step uses
# PCA on all `endog` variables that load on the first factor, and
# subsequent steps use residuals from the previous steps.
# TODO: what about factors that only load on quarterly variables?
endog_factor_map_M = self.endog_factor_map.iloc[:self.k_endog_M]
factors = []
endog = (pd.DataFrame(self.endog).interpolate()
.fillna(method='backfill')
.values)
for name in self.factor_names:
# Try to retrieve this from monthly variables, which is most
# consistent
endog_ix = np.where(endog_factor_map_M.loc[:, name])[0]
# But fall back to quarterly if necessary
if len(endog_ix) == 0:
endog_ix = np.where(self.endog_factor_map.loc[:, name])[0]
factor_endog = endog[:, endog_ix]
res_pca = PCA(factor_endog, ncomp=1, method='eig', normalize=False)
factors.append(res_pca.factors)
endog[:, endog_ix] -= res_pca.projection
factors = np.concatenate(factors, axis=1)
# (2) Estimate coefficients for each endog, one at a time (OLS for
# monthly variables, restricted OLS for quarterly). Also, compute
# residuals.
loadings = []
resid = []
for i in range(self.k_endog_M):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = factors[:, factor_ix]
mod_ols = OLS(self.endog[:, i], exog=factor_exog, missing='drop')
res_ols = mod_ols.fit()
loadings += res_ols.params.tolist()
resid.append(res_ols.resid)
for i in range(self.k_endog_M, self.k_endog):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = lagmat(factors[:, factor_ix], 4, original='in')
mod_glm = GLM(self.endog[:, i], factor_exog, missing='drop')
res_glm = mod_glm.fit_constrained(self.loading_constraints(i))
loadings += res_glm.params[:len(factor_ix)].tolist()
resid.append(res_glm.resid_response)
params[self._p['loadings']] = loadings
# (3) For each factor block, use an AR or VAR model to get coefficients
# and covariance estimate
# Factor transitions
stationary = True
factor_ar = []
factor_cov = []
i = 0
for block in self._s.factor_blocks:
factors_endog = factors[:, i:i + block.k_factors]
i += block.k_factors
if block.factor_order == 0:
continue
if block.k_factors == 1:
mod_factors = SARIMAX(factors_endog,
order=(block.factor_order, 0, 0))
sp = mod_factors.start_params
block_factor_ar = sp[:-1]
block_factor_cov = sp[-1:]
coefficient_matrices = mod_factors.start_params[:-1]
elif block.k_factors > 1:
mod_factors = VAR(factors_endog)
res_factors = mod_factors.fit(
maxlags=block.factor_order, ic=None, trend='nc')
block_factor_ar = res_factors.params.T.ravel()
L = np.linalg.cholesky(res_factors.sigma_u)
block_factor_cov = L[np.tril_indices_from(L)]
coefficient_matrices = np.transpose(
np.reshape(block_factor_ar,
(block.k_factors, block.k_factors,
block.factor_order)), (2, 0, 1))
# Test for stationarity
stationary = is_invertible([1] + list(-coefficient_matrices))
# Check for stationarity
if not stationary:
warn('Non-stationary starting factor autoregressive'
' parameters found for factor block'
f' {block.factor_names}. Using zeros as starting'
' parameters.')
block_factor_ar[:] = 0
cov_factor = np.diag(factors_endog.std(axis=0))
block_factor_cov = (
cov_factor[np.tril_indices(block.k_factors)])
factor_ar += block_factor_ar.tolist()
factor_cov += block_factor_cov.tolist()
params[self._p['factor_ar']] = factor_ar
params[self._p['factor_cov']] = factor_cov
# (4) Use residuals from step (2) to estimate the idiosyncratic
# component
# Idiosyncratic component
if self.idiosyncratic_ar1:
idio_ar1 = []
idio_var = []
for i in range(self.k_endog_M):
mod_idio = SARIMAX(resid[i], order=(1, 0, 0), trend='c')
sp = mod_idio.start_params
idio_ar1.append(np.clip(sp[1], -0.99, 0.99))
idio_var.append(np.clip(sp[-1], 1e-5, np.inf))
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(maxiter=10, return_params=True,
disp=False)
res_idio = mod_idio.fit_em(res_idio, maxiter=5,
return_params=True)
idio_ar1.append(np.clip(res_idio[0], -0.99, 0.99))
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_ar1']] = idio_ar1
params[self._p['idiosyncratic_var']] = idio_var
else:
idio_var = [np.var(resid[i]) for i in range(self.k_endog_M)]
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(return_params=True, disp=False)
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_var']] = idio_var
return params
def transform_params(self, unconstrained):
"""
Transform parameters from optimizer space to model space.
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
"""
constrained = unconstrained.copy()
# Stationary factor VAR
unconstrained_factor_ar = unconstrained[self._p['factor_ar']]
constrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
unconstrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = constrain_stationary_multivariate(tmp_coeff,
tmp_cov)
constrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
constrained[self._p['factor_ar']] = constrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = unconstrained[self._p['idiosyncratic_ar1']]
constrained[self._p['idiosyncratic_ar1']] = [
constrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
constrained[self._p['idiosyncratic_var']] = (
constrained[self._p['idiosyncratic_var']]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform parameters from model space to optimizer space.
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
unconstrained = constrained.copy()
# Stationary factor VAR
constrained_factor_ar = constrained[self._p['factor_ar']]
unconstrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
constrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = unconstrain_stationary_multivariate(tmp_coeff,
tmp_cov)
unconstrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
unconstrained[self._p['factor_ar']] = unconstrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = constrained[self._p['idiosyncratic_ar1']]
unconstrained[self._p['idiosyncratic_ar1']] = [
unconstrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
unconstrained[self._p['idiosyncratic_var']] = (
unconstrained[self._p['idiosyncratic_var']]**0.5)
return unconstrained
def update(self, params, **kwargs):
"""
Update the parameters of the model.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True.
"""
params = super().update(params, **kwargs)
# Local copies
o = self._o
s = self._s
p = self._p
# Loadings
loadings = params[p['loadings']]
start = 0
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1'][iloc]
self['design', i, factor_ix] = loadings[start:start + k_factors]
start += k_factors
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1_5_ix'][:, iloc]
self['design', i, factor_ix.ravel()] = np.ravel(
loadings[start:start + k_factors] * multipliers)
start += k_factors
# Factor VAR
factor_ar = params[p['factor_ar']]
start = 0
for block in s.factor_blocks:
k_params = block.k_factors**2 * block.factor_order
A = np.reshape(
factor_ar[start:start + k_params],
(block.k_factors, block.k_factors * block.factor_order))
start += k_params
self['transition', block['factors_L1'], block['factors_ar']] = A
# Factor covariance
factor_cov = params[p['factor_cov']]
start = 0
ix1 = 0
for block in s.factor_blocks:
k_params = block.k_factors * (block.k_factors + 1) // 2
L = np.zeros((block.k_factors, block.k_factors),
dtype=params.dtype)
L[np.tril_indices_from(L)] = factor_cov[start:start + k_params]
start += k_params
Q = L @ L.T
ix2 = ix1 + block.k_factors
self['state_cov', ix1:ix2, ix1:ix2] = Q
ix1 = ix2
# Error AR(1)
if self.idiosyncratic_ar1:
alpha = np.diag(params[p['idiosyncratic_ar1']])
self['transition', s['idio_ar_L1'], s['idio_ar_L1']] = alpha
# Error variances
if self.idiosyncratic_ar1:
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(params[p['idiosyncratic_var']]))
else:
idio_var = params[p['idiosyncratic_var']]
self['obs_cov', o['M'], o['M']] = np.diag(idio_var[o['M']])
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(idio_var[o['Q']]))
@property
def loglike_constant(self):
"""
Constant term in the joint log-likelihood function.
Useful in facilitating comparisons to other packages that exclude the
constant from the log-likelihood computation.
"""
return -0.5 * (1 - np.isnan(self.endog)).sum() * np.log(2 * np.pi)
def loading_constraints(self, i):
r"""
Matrix formulation of quarterly variables' factor loading constraints.
Parameters
----------
i : int
Index of the `endog` variable to compute constraints for.
Returns
-------
R : array (k_constraints, k_factors * 5)
q : array (k_constraints,)
Notes
-----
If the factors were known, then the factor loadings for the ith
quarterly variable would be computed by a linear regression of the form
y_i = A_i' f + B_i' L1.f + C_i' L2.f + D_i' L3.f + E_i' L4.f
where:
- f is (k_i x 1) and collects all of the factors that load on y_i
- L{j}.f is (k_i x 1) and collects the jth lag of each factor
- A_i, ..., E_i are (k_i x 1) and collect factor loadings
As the observed variable is quarterly while the factors are monthly, we
want to restrict the estimated regression coefficients to be:
y_i = A_i f + 2 A_i L1.f + 3 A_i L2.f + 2 A_i L3.f + A_i L4.f
Stack the unconstrained coefficients: \Lambda_i = [A_i' B_i' ... E_i']'
Then the constraints can be written as follows, for l = 1, ..., k_i
- 2 A_{i,l} - B_{i,l} = 0
- 3 A_{i,l} - C_{i,l} = 0
- 2 A_{i,l} - D_{i,l} = 0
- A_{i,l} - E_{i,l} = 0
So that k_constraints = 4 * k_i. In matrix form the constraints are:
.. math::
R \Lambda_i = q
where :math:`\Lambda_i` is shaped `(k_i * 5,)`, :math:`R` is shaped
`(k_constraints, k_i * 5)`, and :math:`q` is shaped `(k_constraints,)`.
For example, for the case that k_i = 2, we can write:
| 2 0 -1 0 0 0 0 0 0 0 | | A_{i,1} | | 0 |
| 0 2 0 -1 0 0 0 0 0 0 | | A_{i,2} | | 0 |
| 3 0 0 0 -1 0 0 0 0 0 | | B_{i,1} | | 0 |
| 0 3 0 0 0 -1 0 0 0 0 | | B_{i,2} | | 0 |
| 2 0 0 0 0 0 -1 0 0 0 | | C_{i,1} | = | 0 |
| 0 2 0 0 0 0 0 -1 0 0 | | C_{i,2} | | 0 |
| 1 0 0 0 0 0 0 0 -1 0 | | D_{i,1} | | 0 |
| 0 1 0 0 0 0 0 0 0 -1 | | D_{i,2} | | 0 |
| E_{i,1} | | 0 |
| E_{i,2} | | 0 |
"""
if i < self.k_endog_M:
raise ValueError('No constraints for monthly variables.')
if i not in self._loading_constraints:
k_factors = self.endog_factor_map.iloc[i].sum()
R = np.zeros((k_factors * 4, k_factors * 5))
q = np.zeros(R.shape[0])
# Let R = [R_1 R_2]
# Then R_1 is multiples of the identity matrix
multipliers = np.array([1, 2, 3, 2, 1])
R[:, :k_factors] = np.reshape(
(multipliers[1:] * np.eye(k_factors)[..., None]).T,
(k_factors * 4, k_factors))
# And R_2 is the identity
R[:, k_factors:] = np.diag([-1] * (k_factors * 4))
self._loading_constraints[i] = (R, q)
return self._loading_constraints[i]
def fit(self, start_params=None, transformed=True, includes_fixed=False,
cov_type='none', cov_kwds=None, method='em', maxiter=500,
tolerance=1e-6, em_initialization=True, mstep_method=None,
full_output=1, disp=False, callback=None, return_params=False,
optim_score=None, optim_complex_step=None, optim_hessian=None,
flags=None, low_memory=False, llf_decrease_action='revert',
llf_decrease_tolerance=1e-4, **kwargs):
"""
Fits the model by maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'em' for the EM algorithm
- 'newton' for Newton-Raphson
- 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
optim_score : {'harvey', 'approx'} or None, optional
The method by which the score vector is calculated. 'harvey' uses
the method from Harvey (1989), 'approx' uses either finite
difference or complex step differentiation depending upon the
value of `optim_complex_step`, and None uses the built-in gradient
approximation of the optimizer. Default is None. This keyword is
only relevant if the optimization method uses the score.
optim_complex_step : bool, optional
Whether or not to use complex step differentiation when
approximating the score; if False, finite difference approximation
is used. Default is True. This keyword is only relevant if
`optim_score` is set to 'harvey' or 'approx'.
optim_hessian : {'opg','oim','approx'}, optional
The method by which the Hessian is numerically approximated. 'opg'
uses outer product of gradients, 'oim' uses the information
matrix formula from Harvey (1989), and 'approx' uses numerical
approximation. This keyword is only relevant if the
optimization method uses the Hessian matrix.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Note that this option is not available when using the EM algorithm
(which is the default for this model). Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MLEResults
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if method == 'em':
return self.fit_em(
start_params=start_params, transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds, maxiter=maxiter,
tolerance=tolerance, em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output, disp=disp,
return_params=return_params, low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance, **kwargs)
else:
return super().fit(
start_params=start_params, transformed=transformed,
includes_fixed=includes_fixed, cov_type=cov_type,
cov_kwds=cov_kwds, method=method, maxiter=maxiter,
tolerance=tolerance, full_output=full_output, disp=disp,
callback=callback, return_params=return_params,
optim_score=optim_score,
optim_complex_step=optim_complex_step,
optim_hessian=optim_hessian, flags=flags,
low_memory=low_memory, **kwargs)
def fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=500, tolerance=1e-6, disp=False,
em_initialization=True, mstep_method=None, full_output=True,
return_params=False, low_memory=False,
llf_decrease_action='revert', llf_decrease_tolerance=1e-4):
"""
Fits the model by maximum likelihood via the EM algorithm.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is to use `DynamicFactorMQ.start_params`.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
maxiter : int, optional
The maximum number of EM iterations to perform.
tolerance : float, optional
Parameter governing convergence of the EM algorithm. The
`tolerance` is the minimum relative increase in the likelihood
for which convergence will be declared. A smaller value for the
`tolerance` will typically yield more precise parameter estimates,
but will typically require more EM iterations. Default is 1e-6.
disp : int or bool, optional
Controls printing of EM iteration progress. If an integer, progress
is printed at every `disp` iterations. A value of True is
interpreted as the value of 1. Default is False (nothing will be
printed).
em_initialization : bool, optional
Whether or not to also update the Kalman filter initialization
using the EM algorithm. Default is True.
mstep_method : {None, 'missing', 'nonmissing'}, optional
The EM algorithm maximization step. If there are no NaN values
in the dataset, this can be set to "nonmissing" (which is slightly
faster) or "missing", otherwise it must be "missing". Default is
"nonmissing" if there are no NaN values or "missing" if there are.
full_output : bool, optional
Set to True to have all available output from EM iterations in
the Results object's mle_retvals attribute.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
This option cannot be used with the EM algorithm and will raise an
error if set to True. Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
Returns
-------
DynamicFactorMQResults
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if self._has_fixed_params:
raise NotImplementedError('Cannot fit using the EM algorithm while'
' holding some parameters fixed.')
if low_memory:
raise ValueError('Cannot fit using the EM algorithm when using'
' low_memory option.')
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
llf_decrease_action = string_like(
llf_decrease_action, 'llf_decrease_action',
options=['ignore', 'warn', 'revert'])
disp = int(disp)
# Perform expectation-maximization
s = self._s
llf = []
params = [start_params]
init = None
inits = [self.ssm.initialization]
i = 0
delta = 0
terminate = False
# init_stationary = None if em_initialization else True
while i < maxiter and not terminate and (i < 1 or (delta > tolerance)):
out = self._em_iteration(params[-1], init=init,
mstep_method=mstep_method)
new_llf = out[0].llf_obs.sum()
# If we are not using EM initialization, then we need to check for
# non-stationary parameters
if not em_initialization:
self.update(out[1])
switch_init = []
T = self['transition']
init = self.ssm.initialization
iloc = np.arange(self.k_states)
# We may only have global initialization if we have no
# quarterly variables and idiosyncratic_ar1=False
if self.k_endog_Q == 0 and not self.idiosyncratic_ar1:
block = s.factor_blocks[0]
if init.initialization_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
else:
# Factor blocks
for block in s.factor_blocks:
b = tuple(iloc[block['factors']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
if self.idiosyncratic_ar1:
endog_names = self._get_endog_names(as_string=True)
# Monthly variables
for j in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init_type = init.blocks[(j,)].initialization_type
if init_type == 'stationary':
if not np.abs(T[j, j]) < (1 - 1e-10):
init.set(j, 'diffuse')
name = endog_names[j - s['idio_ar_M'].start]
switch_init.append(
'idiosyncratic AR(1) for monthly'
f' variable: {name}')
# Quarterly variables
if self.k_endog_Q > 0:
b = tuple(iloc[s['idio_ar_Q']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[s['idio_ar_Q'], s['idio_ar_Q']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(s['idio_ar_Q'], 'diffuse')
switch_init.append(
'idiosyncratic AR(1) for the'
' block of quarterly variables')
if len(switch_init) > 0:
warn('Non-stationary parameters found at EM iteration'
f' {i + 1}, which is not compatible with'
' stationary initialization. Initialization was'
' switched to diffuse for the following: '
f' {switch_init}, and fitting was restarted.')
results = self.fit_em(
start_params=params[-1], transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds,
maxiter=maxiter, tolerance=tolerance,
em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output,
disp=disp, return_params=return_params,
low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance)
self.ssm.initialize(self._default_initialization())
return results
# Check for decrease in the log-likelihood
# Note: allow a little numerical error before declaring a decrease
llf_decrease = (
i > 0 and (new_llf - llf[-1]) < -llf_decrease_tolerance)
if llf_decrease_action == 'revert' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1}.'
f' Reverting to the results from EM iteration {i}'
' (prior to the decrease) and returning the solution.')
# Terminated iteration
i -= 1
terminate = True
else:
if llf_decrease_action == 'warn' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1},'
' which can indicate numerical issues.')
llf.append(new_llf)
params.append(out[1])
if em_initialization:
init = initialization.Initialization(
self.k_states, 'known',
constant=out[0].smoothed_state[..., 0],
stationary_cov=out[0].smoothed_state_cov[..., 0])
inits.append(init)
if i > 0:
delta = (2 * np.abs(llf[-1] - llf[-2]) /
(np.abs(llf[-1]) + np.abs(llf[-2])))
else:
delta = np.inf
# If `disp` is not False, display the first iteration
if disp and i == 0:
print(f'EM start iterations, llf={llf[-1]:.5g}')
# Print output every `disp` observations
elif disp and ((i + 1) % disp) == 0:
print(f'EM iteration {i + 1}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}')
# Advance the iteration counter
i += 1
# Check for convergence
not_converged = (i == maxiter and delta > tolerance)
# If no convergence without explicit termination, warn users
if not_converged:
warn(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
# If `disp` is not False, display the final iteration
if disp:
if terminate:
print(f'EM terminated at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
elif not_converged:
print(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
else:
print(f'EM converged at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' < tolerance={tolerance:.5g}')
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
if em_initialization:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Note that because we are using params[-1], we are actually using
# the results from one additional iteration compared to the
# iteration at which we declared convergence.
result = self.smooth(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
if em_initialization:
self.ssm.initialization = base_init
# Save the output
if full_output:
llf.append(result.llf)
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i,
'inits': inits})
em_settings = Bunch(**{'method': 'em',
'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result._results.mle_retvals = em_retvals
result._results.mle_settings = em_settings
return result
def _em_iteration(self, params0, init=None, mstep_method=None):
"""EM iteration."""
# (E)xpectation step
res = self._em_expectation_step(params0, init=init)
# (M)aximization step
params1 = self._em_maximization_step(res, params0,
mstep_method=mstep_method)
return res, params1
def _em_expectation_step(self, params0, init=None):
"""EM expectation step."""
# (E)xpectation step
self.update(params0)
# Re-initialize state, if new initialization is given
if init is not None:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Perform smoothing, only saving what is required
res = self.ssm.smooth(
SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_STATE_AUTOCOV,
update_filter=False)
res.llf_obs = np.array(
self.ssm._kalman_filter.loglikelihood, copy=True)
# Reset initialization
if init is not None:
self.ssm.initialization = base_init
return res
def _em_maximization_step(self, res, params0, mstep_method=None):
"""EM maximization step."""
s = self._s
a = res.smoothed_state.T[..., None]
cov_a = res.smoothed_state_cov.transpose(2, 0, 1)
acov_a = res.smoothed_state_autocov.transpose(2, 0, 1)
# E[a_t a_t'], t = 0, ..., T
Eaa = cov_a.copy() + np.matmul(a, a.transpose(0, 2, 1))
# E[a_t a_{t-1}'], t = 1, ..., T
Eaa1 = acov_a[:-1] + np.matmul(a[1:], a[:-1].transpose(0, 2, 1))
# Observation equation
has_missing = np.any(res.nmissing)
if mstep_method is None:
mstep_method = 'missing' if has_missing else 'nonmissing'
mstep_method = mstep_method.lower()
if mstep_method == 'nonmissing' and has_missing:
raise ValueError('Cannot use EM algorithm option'
' `mstep_method="nonmissing"` with missing data.')
if mstep_method == 'nonmissing':
func = self._em_maximization_obs_nonmissing
elif mstep_method == 'missing':
func = self._em_maximization_obs_missing
else:
raise ValueError('Invalid maximization step method: "%s".'
% mstep_method)
# TODO: compute H is pretty slow
Lambda, H = func(res, Eaa, a, compute_H=(not self.idiosyncratic_ar1))
# Factor VAR and covariance
factor_ar = []
factor_cov = []
for b in s.factor_blocks:
A = Eaa[:-1, b['factors_ar'], b['factors_ar']].sum(axis=0)
B = Eaa1[:, b['factors_L1'], b['factors_ar']].sum(axis=0)
C = Eaa[1:, b['factors_L1'], b['factors_L1']].sum(axis=0)
nobs = Eaa.shape[0] - 1
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
f_A = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
f_A = np.linalg.solve(A, B.T).T
f_Q = (C - f_A @ B.T) / nobs
factor_ar += f_A.ravel().tolist()
factor_cov += (
np.linalg.cholesky(f_Q)[np.tril_indices_from(f_Q)].tolist())
# Idiosyncratic AR(1) and variances
if self.idiosyncratic_ar1:
ix = s['idio_ar_L1']
Ad = Eaa[:-1, ix, ix].sum(axis=0).diagonal()
Bd = Eaa1[:, ix, ix].sum(axis=0).diagonal()
Cd = Eaa[1:, ix, ix].sum(axis=0).diagonal()
nobs = Eaa.shape[0] - 1
alpha = Bd / Ad
sigma2 = (Cd - alpha * Bd) / nobs
else:
ix = s['idio_ar_L1']
C = Eaa[:, ix, ix].sum(axis=0)
sigma2 = np.r_[H.diagonal()[self._o['M']],
C.diagonal() / Eaa.shape[0]]
# Save parameters
params1 = np.zeros_like(params0)
loadings = []
for i in range(self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
loadings += Lambda[i, factor_ix].tolist()
params1[self._p['loadings']] = loadings
params1[self._p['factor_ar']] = factor_ar
params1[self._p['factor_cov']] = factor_cov
if self.idiosyncratic_ar1:
params1[self._p['idiosyncratic_ar1']] = alpha
params1[self._p['idiosyncratic_var']] = sigma2
return params1
def _em_maximization_obs_nonmissing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation without missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (non-missing)
# Note: we only compute loadings for monthly variables because
# quarterly variables will always have missing entries, so we would
# never choose this method in that case
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
for i in range(self.k_endog):
y = self.endog[:, i:i + 1]
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
ix = (np.s_[:],) + np.ix_(factor_ix, factor_ix)
A = Eaa[ix].sum(axis=0)
B = y.T @ a[:, factor_ix, 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
B -= Eaa[:, ix1:ix2, factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(A, B.T).T
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# This is written in a slightly more general way than
# Banbura and Modugno (2014), equation (7); see instead equation (13)
# of Wu et al. (1996)
# "An algorithm for estimating parameters of state-space models"
if compute_H:
Z = self['design'].copy()
Z[:, :k] = Lambda
BL = self.endog.T @ a[..., 0] @ Z.T
C = self.endog.T @ self.endog
H = (C + -BL - BL.T + Z @ Eaa.sum(axis=0) @ Z.T) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def _em_maximization_obs_missing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation with missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (missing)
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
W = (1 - res.missing.T)
mask = W.astype(bool)
# Compute design for monthly
# Note: the relevant A changes for each i
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
Bi = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
Bi -= Eaa[m, ix1:ix2][..., factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(Ai), Bi.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(Ai, Bi.T).T
# Compute unrestricted design for quarterly
# See Banbura at al. (2011), where this is described in Appendix C,
# between equations (13) and (14).
if self.k_endog_Q > 0:
# Note: the relevant A changes for each i
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1_5_ix'][:, iloc].ravel().tolist()
R, _ = self.loading_constraints(i)
iQ = i - self.k_endog_M
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
BiQ = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix = (np.s_[:],) + np.ix_(s['idio_ar_Q_ix'][iQ], factor_ix)
Eepsf = Eaa[ix]
BiQ -= (multipliers * Eepsf[m].sum(axis=0)).sum(axis=0)
# Note that there was a typo in Banbura et al. (2011) for
# the formula applying the restrictions. In their notation,
# they show (C D C')^{-1} while it should be (C D^{-1} C')^{-1}
# Note: in reality, this is:
# unrestricted - Aii @ R.T @ RARi @ (R @ unrestricted - q)
# where the restrictions are defined as: R @ unrestricted = q
# However, here q = 0, so we can simplify.
try:
L_and_lower = cho_factor(Ai)
# x = BQ A^{-1}, or x A = BQ, so solve A' x' = (BQ)'
unrestricted = cho_solve(L_and_lower, BiQ.T).T[0]
AiiRT = cho_solve(L_and_lower, R.T)
L_and_lower = cho_factor(R @ AiiRT)
RAiiRTiR = cho_solve(L_and_lower, R)
restricted = unrestricted - AiiRT @ RAiiRTiR @ unrestricted
except LinAlgError:
# Fall back to slower method if there are problems with
# postive-definiteness
Aii = np.linalg.inv(Ai)
unrestricted = (BiQ @ Aii)[0]
RARi = np.linalg.inv(R @ Aii @ R.T)
restricted = (unrestricted -
Aii @ R.T @ RARi @ R @ unrestricted)
Lambda[i, factor_ix] = restricted
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# See Banbura and Modugno (2014), equation (12)
# This does not literally follow their formula, e.g. multiplying by the
# W_t selection matrices, because those formulas require loops that are
# relatively slow. The formulation here is vectorized.
if compute_H:
Z = self['design'].copy()
Z[:, :Lambda.shape[1]] = Lambda
y = np.nan_to_num(self.endog)
C = y.T @ y
W = W[..., None]
IW = 1 - W
WL = W * Z
WLT = WL.transpose(0, 2, 1)
BL = y[..., None] @ a.transpose(0, 2, 1) @ WLT
A = Eaa
BLT = BL.transpose(0, 2, 1)
IWT = IW.transpose(0, 2, 1)
H = (C + (-BL - BLT + WL @ A @ WLT +
IW * self['obs_cov'] * IWT).sum(axis=0)) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def smooth(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, **kwargs):
"""
Kalman smoothing.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is None.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().smooth(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def filter(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, low_memory=False, **kwargs):
"""
Kalman filtering.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is 'none'.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including in-sample prediction), although
out-of-sample forecasting is possible. Default is False.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().filter(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def simulate(self, params, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None, anchor=None,
repetitions=None, exog=None, extend_model=None,
extend_kwargs=None, transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
r"""
Simulate a new time series following the state space model.
Parameters
----------
params : array_like
Array of parameters to use in constructing the state space
representation to use when simulating.
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number of observations.
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the initial state vector to use in
simulation, which should be shaped (`k_states` x 1), where
`k_states` is the same as in the state space model. If unspecified,
but the model has been initialized, then that initialization is
used. This must be specified if `anchor` is anything other than
"start" or 0 (or else you can use the `simulate` method on a
results object rather than on the model object).
anchor : int, str, or datetime, optional
First period for simulation. The simulation will be conditional on
all existing datapoints prior to the `anchor`. Type depends on the
index of the given `endog` in the model. Two special cases are the
strings 'start' and 'end'. `start` refers to beginning the
simulation at the first period of the sample, and `end` refers to
beginning the simulation at the first period after the sample.
Integer values can run from 0 to `nobs`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
repetitions : int, optional
Number of simulated paths to generate. Default is 1 simulated path.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return simulations in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
Returns
-------
simulated_obs : ndarray
An array of simulated observations. If `repetitions=None`, then it
will be shaped (nsimulations x k_endog) or (nsimulations,) if
`k_endog=1`. Otherwise it will be shaped
(nsimulations x k_endog x repetitions). If the model was given
Pandas input then the output will be a Pandas object. If
`k_endog > 1` and `repetitions` is not None, then the output will
be a Pandas DataFrame that has a MultiIndex for the columns, with
the first level containing the names of the `endog` variables and
the second level containing the repetition number.
"""
# Get usual simulations (in the possibly-standardized scale)
sim = super().simulate(
params, nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state,
anchor=anchor, repetitions=repetitions, exog=exog,
extend_model=extend_model, extend_kwargs=extend_kwargs,
transformed=transformed, includes_fixed=includes_fixed, **kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = sim.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std[0] + self._endog_mean[0]
# pd.DataFrame (k_endog > 1, replications=None)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
sim = (sim.multiply(self._endog_std, axis=1, level=0)
.add(self._endog_mean, axis=1, level=0))
else:
# 1-dim array (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std + self._endog_mean
# 2-dim array (k_endog > 1, replications=None)
elif len(shape) == 2:
sim = sim * self._endog_std + self._endog_mean
# 3-dim array with MultiIndex (replications > 0)
else:
# Get arrays into the form that can be used for
# broadcasting
std = np.atleast_2d(self._endog_std)[..., None]
mean = np.atleast_2d(self._endog_mean)[..., None]
sim = sim * std + mean
return sim
def impulse_responses(self, params, steps=1, impulse=0,
orthogonalized=False, cumulative=False, anchor=None,
exog=None, extend_model=None, extend_kwargs=None,
transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
"""
Impulse response function.
Parameters
----------
params : array_like
Array of model parameters.
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that for time-invariant models, the initial
impulse is not counted as a step, so if `steps=1`, the output will
have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. Alternatively, a custom impulse vector may be
provided; must be shaped `k_posdef x 1`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
anchor : int, str, or datetime, optional
Time point within the sample for the state innovation impulse. Type
depends on the index of the given `endog` in the model. Two special
cases are the strings 'start' and 'end', which refer to setting the
impulse at the first and last points of the sample, respectively.
Integer values can run from 0 to `nobs - 1`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
exog : array_like, optional
New observations of exogenous regressors for our-of-sample periods,
if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return impulse responses in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
If the model has time-varying design or transition matrices and the
combination of `anchor` and `steps` implies creating impulse
responses for the out-of-sample period, then these matrices must
have updated values provided for the out-of-sample steps. For
example, if `design` is a time-varying component, `nobs` is 10,
`anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)
matrix must be provided with the new design matrix values.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. For a time-invariant model, the
impulse responses are given for `steps + 1` elements (this gives
the "initial impulse" followed by `steps` responses for the
important cases of VAR and SARIMAX models), while for time-varying
models the impulse responses are only given for `steps` elements
(to avoid having to unexpectedly provide updated time-varying
matrices).
"""
# Get usual simulations (in the possibly-standardized scale)
irfs = super().impulse_responses(
params, steps=steps, impulse=impulse,
orthogonalized=orthogonalized, cumulative=cumulative,
anchor=anchor, exog=exog, extend_model=extend_model,
extend_kwargs=extend_kwargs, transformed=transformed,
includes_fixed=includes_fixed, original_scale=original_scale,
**kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = irfs.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
irfs = irfs * self._endog_std[0]
# pd.DataFrame (k_endog > 1)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
irfs = irfs.multiply(self._endog_std, axis=1, level=0)
else:
# 1-dim array (k_endog=1)
if len(shape) == 1:
irfs = irfs * self._endog_std
# 2-dim array (k_endog > 1)
elif len(shape) == 2:
irfs = irfs * self._endog_std
return irfs
class DynamicFactorMQResults(mlemodel.MLEResults):
"""
Results from fitting a dynamic factor model
"""
def __init__(self, model, params, filter_results, cov_type=None, **kwargs):
super(DynamicFactorMQResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
@property
def factors(self):
"""
Estimates of unobserved factors.
Returns
-------
out : Bunch
Has the following attributes shown in Notes.
Notes
-----
The output is a bunch of the following format:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
if self.model.k_factors > 0:
iloc = self.model._s.factors_L1
ix = np.array(self.model.state_names)[iloc].tolist()
out = Bunch(
filtered=self.states.filtered.loc[:, ix],
filtered_cov=self.states.filtered_cov.loc[np.s_[ix, :], ix],
smoothed=None, smoothed_cov=None)
if self.smoothed_state is not None:
out.smoothed = self.states.smoothed.loc[:, ix]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.states.smoothed_cov.loc[np.s_[ix, :], ix])
return out
def get_coefficients_of_determination(self, method='individual',
which=None):
"""
Get coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
Returns
-------
rsquared : pd.DataFrame or pd.Series
The R-squared values from regressions of observed variables on
one or more of the factors. If method='individual' or
method='cumulative', this will be a Pandas DataFrame with observed
variables as the index and factors as the columns . If
method='joint', will be a Pandas Series with observed variables as
the index.
See Also
--------
plot_coefficients_of_determination
coefficients_of_determination
"""
from statsmodels.tools import add_constant
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
if which is None:
which = 'filtered' if self.smoothed_state is None else 'smoothed'
k_endog = self.model.k_endog
k_factors = self.model.k_factors
ef_map = self.model._s.endog_factor_map
endog_names = self.model.endog_names
factor_names = self.model.factor_names
if method == 'individual':
coefficients = np.zeros((k_endog, k_factors))
for i in range(k_factors):
exog = add_constant(self.factors[which].iloc[:, i])
for j in range(k_endog):
if ef_map.iloc[j, i]:
endog = self.filter_results.endog[j]
coefficients[j, i] = (
OLS(endog, exog, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
elif method == 'joint':
coefficients = np.zeros((k_endog,))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
ix = np.r_[True, ef_map.iloc[j]].tolist()
X = exog.loc[:, ix]
coefficients[j] = (
OLS(endog, X, missing='drop').fit().rsquared)
coefficients = pd.Series(coefficients, index=endog_names)
elif method == 'cumulative':
coefficients = np.zeros((k_endog, k_factors))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
for i in range(k_factors):
if self.model._s.endog_factor_map.iloc[j, i]:
ix = np.r_[True, ef_map.iloc[j, :i + 1],
[False] * (k_factors - i - 1)]
X = exog.loc[:, ix.astype(bool).tolist()]
coefficients[j, i] = (
OLS(endog, X, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
return coefficients
@cache_readonly
def coefficients_of_determination(self):
"""
Individual coefficients of determination (:math:`R^2`).
Coefficients of determination (:math:`R^2`) from regressions of
endogenous variables on individual estimated factors.
Returns
-------
coefficients_of_determination : ndarray
A `k_endog` x `k_factors` array, where
`coefficients_of_determination[i, j]` represents the :math:`R^2`
value from a regression of factor `j` and a constant on endogenous
variable `i`.
Notes
-----
Although it can be difficult to interpret the estimated factor loadings
and factors, it is often helpful to use the coefficients of
determination from univariate regressions to assess the importance of
each factor in explaining the variation in each endogenous variable.
In models with many variables and factors, this can sometimes lend
interpretation to the factors (for example sometimes one factor will
load primarily on real variables and another on nominal variables).
See Also
--------
get_coefficients_of_determination
plot_coefficients_of_determination
"""
return self.get_coefficients_of_determination(method='individual')
def plot_coefficients_of_determination(self, method='individual',
which=None, endog_labels=None,
fig=None, figsize=None):
"""
Plot coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
endog_labels : bool, optional
Whether or not to label the endogenous variables along the x-axis
of the plots. Default is to include labels if there are 5 or fewer
endogenous variables.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
The endogenous variables are arranged along the x-axis according to
their position in the model's `endog` array.
See Also
--------
get_coefficients_of_determination
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
# Should we label endogenous variables?
if endog_labels is None:
endog_labels = self.model.k_endog <= 5
# Plot the coefficients of determination
rsquared = self.get_coefficients_of_determination(method=method,
which=which)
if method in ['individual', 'cumulative']:
plot_idx = 1
for factor_name, coeffs in rsquared.T.iterrows():
# Create the new axis
ax = fig.add_subplot(self.model.k_factors, 1, plot_idx)
ax.set_ylim((0, 1))
ax.set(title=f'{factor_name}', ylabel=r'$R^2$')
coeffs.plot(ax=ax, kind='bar')
if plot_idx < len(rsquared.columns) or not endog_labels:
ax.xaxis.set_ticklabels([])
plot_idx += 1
elif method == 'joint':
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim((0, 1))
ax.set(title=r'$R^2$ - regression on all loaded factors',
ylabel=r'$R^2$')
rsquared.plot(ax=ax, kind='bar')
if not endog_labels:
ax.xaxis.set_ticklabels([])
return fig
def get_prediction(self, start=None, end=None, dynamic=False,
index=None, exog=None, extend_model=None,
extend_kwargs=None, original_scale=True, **kwargs):
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return predictions in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
# Get usual predictions (in the possibly-standardized scale)
res = super().get_prediction(start=start, end=end, dynamic=dynamic,
index=index, exog=exog,
extend_model=extend_model,
extend_kwargs=extend_kwargs, **kwargs)
# If applicable, convert predictions back to original space
if self.model.standardize and original_scale:
prediction_results = res.prediction_results
k_endog, nobs = prediction_results.endog.shape
mean = np.array(self.model._endog_mean)
std = np.array(self.model._endog_std)
if self.model.k_endog > 1:
mean = mean[None, :]
std = std[None, :]
if not prediction_results.results.memory_no_forecast_mean:
res._results._predicted_mean = (
res._results._predicted_mean * std + mean)
if not prediction_results.results.memory_no_forecast_cov:
if k_endog == 1:
res._results._var_pred_mean *= std**2
else:
res._results._var_pred_mean = (
std * res._results._var_pred_mean * std.T)
return res
def news(self, comparison, impact_date=None, impacted_variable=None,
start=None, end=None, periods=None, exog=None,
comparison_type=None, return_raw=False, tolerance=1e-10,
endog_quarterly=None, original_scale=True, **kwargs):
"""
Compute impacts from updated data (news and revisions).
Parameters
----------
comparison : array_like or MLEResults
An updated dataset with updated and/or revised data from which the
news can be computed, or an updated or previous results object
to use in computing the news.
impact_date : int, str, or datetime, optional
A single specific period of impacts from news and revisions to
compute. Can also be a date string to parse or a datetime type.
This argument cannot be used in combination with `start`, `end`, or
`periods`. Default is the first out-of-sample observation.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying that only
specific impacted variables should be shown in the News output. The
impacted variable(s) describe the variables that were *affected* by
the news. If you do not know the labels for the variables, check
the `endog_names` attribute of the model instance.
start : int, str, or datetime, optional
The first period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
end : int, str, or datetime, optional
The last period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
periods : int, optional
The number of periods of impacts from news and revisions to
compute.
exog : array_like, optional
Array of exogenous regressors for the out-of-sample period, if
applicable.
comparison_type : {None, 'previous', 'updated'}
This denotes whether the `comparison` argument represents a
*previous* results object or dataset or an *updated* results object
or dataset. If not specified, then an attempt is made to determine
the comparison type.
return_raw : bool, optional
Whether or not to return only the specific output or a full
results object. Default is to return a full results object.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is
that any impact less than 1e-10 is assumed to be zero.
endog_quarterly : array_like, optional
New observations of quarterly variables, if `comparison` was
provided as an updated monthly dataset. If this argument is
provided, it must be a Pandas Series or DataFrame with a
DatetimeIndex or PeriodIndex at the quarterly frequency.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013.
"""
news_results = super().news(
comparison, impact_date=impact_date,
impacted_variable=impacted_variable, start=start, end=end,
periods=periods, exog=exog, comparison_type=comparison_type,
return_raw=return_raw, tolerance=tolerance,
endog_quarterly=endog_quarterly, **kwargs)
# If we have standardized the data, we may want to report the news in
# the original scale. If so, we need to modify the data to "undo" the
# standardization.
if not return_raw and self.model.standardize and original_scale:
endog_mean = self.model._endog_mean
endog_std = self.model._endog_std
# Don't need to add in the mean for the impacts, since they are
# the difference of two forecasts
news_results.total_impacts = (
news_results.total_impacts * endog_std)
news_results.update_impacts = (
news_results.update_impacts * endog_std)
if news_results.revision_impacts is not None:
news_results.revision_impacts = (
news_results.revision_impacts * endog_std)
# Update forecasts
for name in ['prev_impacted_forecasts', 'news', 'update_realized',
'update_forecasts', 'post_impacted_forecasts']:
dta = getattr(news_results, name)
# for pd.Series, dta.multiply(...) removes the name attribute;
# save it now so that we can add it back in
orig_name = None
if hasattr(dta, 'name'):
orig_name = dta.name
dta = dta.multiply(endog_std, level=1)
# add back in the name attribute if it was removed
if orig_name is not None:
dta.name = orig_name
if name != 'news':
dta = dta.add(endog_mean, level=1)
setattr(news_results, name, dta)
# For the weights: rows correspond to update (date, variable) and
# columns correspond to the impacted variable.
# 1. Because we have modified the updates (realized, forecasts, and
# forecast errors) to be in the scale of the original updated
# variable, we need to essentially reverse that change for each
# row of the weights by dividing by the standard deviation of
# that row's updated variable
# 2. Because we want the impacts to be in the scale of the original
# impacted variable, we need to multiply each column by the
# standard deviation of that column's impacted variable
news_results.weights = (
news_results.weights.divide(endog_std, axis=0, level=1)
.multiply(endog_std, axis=1, level=1))
return news_results
def append(self, endog, endog_quarterly=None, refit=False, fit_kwargs=None,
copy_initialization=True, retain_standardization=True,
**kwargs):
"""
Recreate the results object with new data appended to original data.
Creates a new result object applied to a dataset that is created by
appending new data to the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, based on the combined dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is True.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results from both the
original dataset and the new dataset.
Notes
-----
The `endog` and `exog` arguments to this method must be formatted in
the same way (e.g. Pandas Series versus Numpy array) as were the
`endog` and `exog` arrays passed to the original model.
The `endog` (and, if applicable, `endog_quarterly`) arguments to this
method should consist of new observations that occurred directly after
the last element of `endog`. For any other kind of dataset, see the
`apply` method.
This method will apply filtering to all of the original data as well
as to the new data. To apply filtering only to the new data (which
can be much faster if the original dataset is large), see the `extend`
method.
See Also
--------
extend
apply
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().append(
endog, refit=refit, fit_kwargs=fit_kwargs,
copy_initialization=copy_initialization,
retain_standardization=retain_standardization, **kwargs)
def extend(self, endog, endog_quarterly=None, fit_kwargs=None,
retain_standardization=True, **kwargs):
"""
Recreate the results object for new data that extends original data.
Creates a new result object applied to a new dataset that is assumed to
follow directly from the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
fit_kwargs : dict, optional
Keyword arguments to pass to `filter` or `smooth`.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
append
apply
Notes
-----
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of the model's original
`endog` array. For any other kind of dataset, see the `apply` method.
This method will apply filtering only to the new data provided by the
`endog` argument, which can be much faster than re-filtering the entire
dataset. However, the returned results object will only have results
for the new data. To retrieve results for both the new data and the
original data, see the `append` method.
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().extend(
endog, fit_kwargs=fit_kwargs,
retain_standardization=retain_standardization, **kwargs)
def apply(self, endog, k_endog_monthly=None, endog_quarterly=None,
refit=False, fit_kwargs=None, copy_initialization=False,
retain_standardization=True, **kwargs):
"""
Apply the fitted parameters to new data unrelated to the original data.
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset. For observations that continue that original dataset by follow
directly after its last element, see the `append` and `extend` methods.
"""
mod = self.model.clone(endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly,
retain_standardization=retain_standardization,
**kwargs)
if copy_initialization:
res = self.filter_results
init = initialization.Initialization(
self.model.k_states, 'known', constant=res.initial_state,
stationary_cov=res.initial_state_cov)
mod.ssm.initialization = init
res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)
return res
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True, display_diagnostics=False,
display_params_as_list=False, truncate_endog_names=None,
display_max_endog=3):
"""
Summarize the Model.
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title used for the summary table.
model_name : str, optional
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
mod = self.model
# Default title / model name
if title is None:
title = 'Dynamic Factor Results'
if model_name is None:
model_name = self.model._model_name
# Get endog names
endog_names = self.model._get_endog_names(
truncate=truncate_endog_names)
# Get extra elements for top summary table
extra_top_left = None
extra_top_right = []
mle_retvals = getattr(self, 'mle_retvals', None)
mle_settings = getattr(self, 'mle_settings', None)
if mle_settings is not None and mle_settings.method == 'em':
extra_top_right += [('EM Iterations', [f'{mle_retvals.iter}'])]
# Get the basic summary tables
summary = super().summary(
alpha=alpha, start=start, title=title, model_name=model_name,
display_params=(display_params and display_params_as_list),
display_diagnostics=display_diagnostics,
truncate_endog_names=truncate_endog_names,
display_max_endog=display_max_endog,
extra_top_left=extra_top_left, extra_top_right=extra_top_right)
# Get tables of parameters
table_ix = 1
if not display_params_as_list:
# Observation equation table
data = pd.DataFrame(
self.filter_results.design[:, mod._s['factors_L1'], 0],
index=endog_names, columns=mod.factor_names)
data = data.applymap(lambda s: '%.2f' % s)
# Idiosyncratic terms
# data[' '] = ' '
k_idio = 1
if mod.idiosyncratic_ar1:
data[' idiosyncratic: AR(1)'] = (
self.params[mod._p['idiosyncratic_ar1']])
k_idio += 1
data['var.'] = self.params[mod._p['idiosyncratic_var']]
data.iloc[:, -k_idio:] = data.iloc[:, -k_idio:].applymap(
lambda s: '%.2f' % s)
data.index.name = 'Factor loadings:'
# Clear entries for non-loading factors
base_iloc = np.arange(mod.k_factors)
for i in range(mod.k_endog):
iloc = [j for j in base_iloc
if j not in mod._s.endog_factor_iloc[i]]
data.iloc[i, iloc] = '.'
data = data.reset_index()
# Build the table
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'Observation equation:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# Factor transitions
ix1 = 0
ix2 = 0
for i in range(len(mod._s.factor_blocks)):
block = mod._s.factor_blocks[i]
ix2 += block.k_factors
T = self.filter_results.transition
lag_names = []
for j in range(block.factor_order):
lag_names += [f'L{j + 1}.{name}'
for name in block.factor_names]
data = pd.DataFrame(T[block.factors_L1, block.factors_ar, 0],
index=block.factor_names,
columns=lag_names)
data.index.name = ''
data = data.applymap(lambda s: '%.2f' % s)
Q = self.filter_results.state_cov
# data[' '] = ''
if block.k_factors == 1:
data[' error variance'] = Q[ix1, ix1]
else:
data[' error covariance'] = block.factor_names
for j in range(block.k_factors):
data[block.factor_names[j]] = Q[ix1:ix2, ix1 + j]
data.iloc[:, -block.k_factors:] = (
data.iloc[:, -block.k_factors:].applymap(
lambda s: '%.2f' % s))
data = data.reset_index()
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = f'Transition: Factor block {i}'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
ix1 = ix2
return summary
| bsd-3-clause |
tommyjasmin/polar2grid | py/misc/plot_img.py | 1 | 1063 | from numpy import *
from matplotlib import pyplot as plt
from polar2grid.core import Workspace; W=Workspace('.')
import os
import sys
from glob import glob
if "-f" in sys.argv:
fit = True
else:
fit = False
for img_name in glob("image_*") + glob("prescale_DNB*"):
print "Plotting for %s" % img_name
# Get the data and mask it
img_name = img_name.split(".")[0]
img=getattr(W, img_name)
discard = (img <= -999)
data=ma.masked_array(img, discard)
# Plot the data
print data.min(),data.max()
# Create a new figure everytime so things don't get shared
if fit:
fsize = (array(data.shape)/100.0)[::-1]
plt.figure(figsize=fsize, dpi=100)
else:
plt.figure()
plt.imshow(data)
#plt.spectral()
plt.bone()
if fit:
plt.subplots_adjust(left=0, top=1, bottom=0, right=1, wspace=0, hspace=0)
plt.savefig("plot_%s.png" % img_name, dpi=100)
else:
# Add a colorbar and force the colormap
plt.colorbar()
plt.savefig("plot_%s.png" % img_name)
| gpl-3.0 |
ch3ll0v3k/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
SheffieldML/GPy | GPy/examples/non_gaussian.py | 1 | 11410 | # Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import GPy
import numpy as np
MPL_AVAILABLE = True
try:
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
def student_t_approx(optimize=True, plot=True):
"""
Example of regressing with a student t likelihood using Laplace
"""
real_std = 0.1
# Start a function, any function
X = np.linspace(0.0, np.pi * 2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape) * real_std
Y = Y / Y.max()
Yc = Y.copy()
X_full = np.linspace(0.0, np.pi * 2, 500)[:, None]
Y_full = np.sin(X_full)
Y_full = Y_full / Y_full.max()
# Slightly noisy data
Yc[75:80] += 1
# Very noisy data
# Yc[10] += 100
# Yc[25] += 10
# Yc[23] += 10
# Yc[26] += 1000
# Yc[24] += 10
# Yc = Yc/Yc.max()
# Add student t random noise to datapoints
deg_free = 1
print("Real noise: ", real_std)
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
# Kernel object
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel3 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel4 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
# Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1[".*white"].constrain_fixed(1e-5)
m1.randomize()
# Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2[".*white"].constrain_fixed(1e-5)
m2.randomize()
# Student t GP model on clean data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m3 = GPy.core.GP(
X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf
)
m3[".*t_scale2"].constrain_bounded(1e-6, 10.0)
m3[".*white"].constrain_fixed(1e-5)
m3.randomize()
# Student t GP model on corrupt data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m4 = GPy.core.GP(
X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf
)
m4[".*t_scale2"].constrain_bounded(1e-6, 10.0)
m4[".*white"].constrain_fixed(1e-5)
m4.randomize()
print(m4)
debug = True
if debug:
m4.optimize(messages=1)
from matplotlib import pyplot as pb
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, "rx")
m4.plot()
print(m4)
return m4
if optimize:
optimizer = "scg"
print("Clean Gaussian")
m1.optimize(optimizer, messages=1)
print("Corrupt Gaussian")
m2.optimize(optimizer, messages=1)
print("Clean student t")
m3.optimize(optimizer, messages=1)
print("Corrupt student t")
m4.optimize(optimizer, messages=1)
if MPL_AVAILABLE and plot:
plt.figure(1)
plt.suptitle("Gaussian likelihood")
ax = plt.subplot(211)
m1.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title("Gaussian clean")
ax = plt.subplot(212)
m2.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title("Gaussian corrupt")
plt.figure(2)
plt.suptitle("Student-t likelihood")
ax = plt.subplot(211)
m3.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title("Student-t rasm clean")
ax = plt.subplot(212)
m4.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title("Student-t rasm corrupt")
return m1, m2, m3, m4
def boston_example(optimize=True, plot=True):
raise NotImplementedError("Needs updating")
import sklearn
from sklearn.cross_validation import KFold
optimizer = "bfgs"
messages = 0
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.boston_housing()
degrees_freedoms = [3, 5, 8, 10]
X = data["X"].copy()
Y = data["Y"].copy()
X = X - X.mean(axis=0)
X = X / X.std(axis=0)
Y = Y - Y.mean()
Y = Y / Y.std()
num_folds = 10
kf = KFold(len(Y), n_folds=num_folds, indices=True)
num_models = (
len(degrees_freedoms) + 3
) # 3 for baseline, gaussian, gaussian laplace approx
score_folds = np.zeros((num_models, num_folds))
pred_density = score_folds.copy()
def rmse(Y, Ystar):
return np.sqrt(np.mean((Y - Ystar) ** 2))
for n, (train, test) in enumerate(kf):
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
print("Fold {}".format(n))
noise = 1e-1 # np.exp(-2)
rbf_len = 0.5
data_axis_plot = 4
kernelstu = (
GPy.kern.RBF(X.shape[1])
+ GPy.kern.white(X.shape[1])
+ GPy.kern.bias(X.shape[1])
)
kernelgp = (
GPy.kern.RBF(X.shape[1])
+ GPy.kern.white(X.shape[1])
+ GPy.kern.bias(X.shape[1])
)
# Baseline
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
# Gaussian GP
print("Gauss GP")
mgp = GPy.models.GPRegression(
X_train.copy(), Y_train.copy(), kernel=kernelgp.copy()
)
mgp.constrain_fixed(".*white", 1e-5)
mgp[".*len"] = rbf_len
mgp[".*noise"] = noise
print(mgp)
if optimize:
mgp.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mgp.predict(X_test)
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
print(mgp)
print(pred_density)
print("Gaussian Laplace GP")
N, D = Y_train.shape
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(
variance=noise, N=N, D=D
)
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
mg = GPy.models.GPRegression(
X_train.copy(),
Y_train.copy(),
kernel=kernelstu.copy(),
likelihood=g_likelihood,
)
mg.constrain_positive("noise_variance")
mg.constrain_fixed(".*white", 1e-5)
mg["rbf_len"] = rbf_len
mg["noise"] = noise
print(mg)
if optimize:
mg.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mg.predict(X_test)
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
print(pred_density)
print(mg)
for stu_num, df in enumerate(degrees_freedoms):
# Student T
print("Student-T GP {}df".format(df))
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(
deg_free=df, sigma2=noise
)
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
mstu_t = GPy.models.GPRegression(
X_train.copy(),
Y_train.copy(),
kernel=kernelstu.copy(),
likelihood=stu_t_likelihood,
)
mstu_t.constrain_fixed(".*white", 1e-5)
mstu_t.constrain_bounded(".*t_scale2", 0.0001, 1000)
mstu_t["rbf_len"] = rbf_len
mstu_t[".*t_scale2"] = noise
print(mstu_t)
if optimize:
mstu_t.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mstu_t.predict(X_test)
score_folds[3 + stu_num, n] = rmse(Y_test, Y_test_pred[0])
pred_density[3 + stu_num, n] = np.mean(
mstu_t.log_predictive_density(X_test, Y_test)
)
print(pred_density)
print(mstu_t)
if MPL_AVAILABLE and plot:
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c="r", marker="x")
plt.title("GP gauss")
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c="r", marker="x")
plt.title("Lap gauss")
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c="r", marker="x")
plt.title("Stu t {}df".format(df))
print("Average scores: {}".format(np.mean(score_folds, 1)))
print("Average pred density: {}".format(np.mean(pred_density, 1)))
if MPL_AVAILABLE and plot:
# Plotting
stu_t_legends = ["Student T, df={}".format(df) for df in degrees_freedoms]
legends = ["Baseline", "Gaussian", "Laplace Approx Gaussian"] + stu_t_legends
# Plot boxplots for RMSE density
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title("RMSE")
bp = ax.boxplot(score_folds.T, notch=0, sym="+", vert=1, whis=1.5)
plt.setp(bp["boxes"], color="black")
plt.setp(bp["whiskers"], color="black")
plt.setp(bp["fliers"], color="red", marker="+")
xtickNames = plt.setp(ax, xticklabels=legends)
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel("RMSE")
ax.set_xlabel("Distribution")
# Make grid and put it below boxes
ax.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5)
ax.set_axisbelow(True)
# Plot boxplots for predictive density
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title("Predictive density")
bp = ax.boxplot(pred_density[1:, :].T, notch=0, sym="+", vert=1, whis=1.5)
plt.setp(bp["boxes"], color="black")
plt.setp(bp["whiskers"], color="black")
plt.setp(bp["fliers"], color="red", marker="+")
xtickNames = plt.setp(ax, xticklabels=legends[1:])
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel("Mean Log probability P(Y*|Y)")
ax.set_xlabel("Distribution")
# Make grid and put it below boxes
ax.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5)
ax.set_axisbelow(True)
return mstu_t
# def precipitation_example():
# import sklearn
# from sklearn.cross_validation import KFold
# data = datasets.boston_housing()
# X = data["X"].copy()
# Y = data["Y"].copy()
# X = X - X.mean(axis=0)
# X = X / X.std(axis=0)
# Y = Y - Y.mean()
# Y = Y / Y.std()
# import ipdb
# ipdb.set_trace() # XXX BREAKPOINT
# num_folds = 10
# kf = KFold(len(Y), n_folds=num_folds, indices=True)
# score_folds = np.zeros((4, num_folds))
# def rmse(Y, Ystar):
# return np.sqrt(np.mean((Y - Ystar) ** 2))
# for train, test in kf:
# for n, (train, test) in enumerate(kf):
# X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
# print("Fold {}".format(n))
| bsd-3-clause |
bthirion/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 12 | 4111 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric='precomputed')
with assert_raises(ValueError) as context:
clf.fit(X, y)
assert_equal(ValueError, type(context.exception))
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
rs2/pandas | pandas/core/internals/__init__.py | 2 | 1035 | from pandas.core.internals.blocks import ( # io.pytables, io.packers
Block,
BoolBlock,
CategoricalBlock,
ComplexBlock,
DatetimeBlock,
DatetimeTZBlock,
ExtensionBlock,
FloatBlock,
IntBlock,
ObjectBlock,
TimeDeltaBlock,
make_block,
safe_reshape,
)
from pandas.core.internals.concat import concatenate_block_managers
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
__all__ = [
"Block",
"BoolBlock",
"CategoricalBlock",
"ComplexBlock",
"DatetimeBlock",
"DatetimeTZBlock",
"ExtensionBlock",
"FloatBlock",
"IntBlock",
"ObjectBlock",
"TimeDeltaBlock",
"safe_reshape",
"make_block",
"BlockManager",
"SingleBlockManager",
"concatenate_block_managers",
# those two are preserved here for downstream compatibility (GH-33892)
"create_block_manager_from_arrays",
"create_block_manager_from_blocks",
]
| bsd-3-clause |
sheqi/TVpgGLM | utils/plot_networks.py | 1 | 2646 | # From https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY
# Plot weighted directed positive/negative network graph
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Circle
import numpy as np
def draw_curvy_network(G, pos, ax, node_radius=0.02, node_color='b', node_edge_color='b', node_alpha=0.5, edge_color=None, edge_alpha=0.5, edge_width=None):
assert isinstance(G, nx.Graph), "G must be a NetworkX graph!"
# Convert node colors to lists
def _to_list(x, N):
if isinstance(x, list):
assert len(x) == N
return x
else:
return [x] * N
node_radius = _to_list(node_radius, len(G.nodes()))
node_color = _to_list(node_color, len(G.nodes()))
node_edge_color = _to_list(node_edge_color, len(G.nodes()))
node_alpha = _to_list(node_alpha, len(G.nodes()))
if edge_color is None:
edge_color = _to_list('k', len(G.edges()))
edge_alpha = _to_list(edge_alpha, len(G.edges()))
# if user specify edge-width it is not the same
if edge_width is None:
edge_width = 2
edge_width = _to_list(edge_width, len(G.edges()))
# Plot the nodes
for n, r, a, fc, ec in zip(G, node_radius, node_alpha, node_color, node_edge_color):
c = Circle(pos[n], radius=r, alpha=a, fc=fc, ec=ec)
ax.add_patch(c)
G.node[n]['patch'] = c
# Plot the edges
seen = {}
for (u, v, d), a, lw, ec in zip(G.edges(data=True), edge_alpha, edge_width, edge_color):
n1 = G.node[u]['patch']
n2 = G.node[v]['patch']
rad = -0.1
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1
e = FancyArrowPatch(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw, alpha=a, color=ec)
seen[(u, v)] = rad
ax.add_patch(e)
return e
if __name__ == "__main__":
from hips.plotting.colormaps import harvard_colors
color = harvard_colors()[0:10]
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])
pos = nx.spring_layout(G)
ax = plt.gca()
edge_width = [5, 0.9, 0.8, 2, 2, 1, 5]
edge_color = [color[0], color[0], color[0], color[0], color[1], color[1], color[1]]
draw_curvy_network(G, pos, ax, node_color='k', node_edge_color='k', edge_width=edge_width, edge_color=edge_color)
ax.autoscale()
plt.axis('equal')
plt.axis('off')
# plt.savefig("graph.pdf")
plt.show()
| mit |
EvanzzzZ/mxnet | example/speech_recognition/stt_utils.py | 11 | 5031 | import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
snurkabill/pydeeplearn | code/similarity/similarityMain.py | 3 | 29514 | """ Let's run some experiments with the similarity networks."""
__author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import argparse
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
import sys
# We need this to import other modules
sys.path.append("..")
from similarityUtils import *
from lib.activationfunctions import *
from read.readfacedatabases import *
import similarity
parser = argparse.ArgumentParser(description='digit recognition')
parser.add_argument('--relu', dest='relu',action='store_true', default=False,
help=("if true, trains the RBM with a rectified linear units"))
parser.add_argument('--sparsity', dest='sparsity',action='store_true', default=False,
help=("if true, trains the RBM with a sparsity constraint"))
parser.add_argument('--rmsprop', dest='rmsprop',action='store_true', default=False,
help=("if true, trains the similarity net is "))
parser.add_argument('--cv', dest='cv',action='store_true', default=False,
help=("if true, does cv"))
parser.add_argument('--cvEmotion', dest='cvEmotion',action='store_true', default=False,
help=("if true, does cv for emotions"))
parser.add_argument('--testYaleMain', dest='testYaleMain',action='store_true', default=False,
help=("if true, tests the net with the Kanade databse"))
parser.add_argument('--diffsubjects', dest='diffsubjects',action='store_true', default=False,
help=("if true, trains a net with different test and train subjects"))
parser.add_argument('--emotionsdiff', dest='emotionsdiff',action='store_true', default=False,
help=("if true, trains a net to distinguish between emotions"))
parser.add_argument('--emotionsdiffsamesubj', dest='emotionsdiffsamesubj',action='store_true', default=False,
help=("if true, trains a net to distinguish between emotions where the pictures presented are the same people"))
parser.add_argument('--emotionssim', dest='emotionssim',action='store_true', default=False,
help=("if true, trains a network to distinguish between subjects and then looks at the reported similarities depending on the emotion"))
parser.add_argument('--equalize',dest='equalize',action='store_true', default=False,
help="if true, the input images are equalized before being fed into the net")
parser.add_argument('--nrHidden',dest='nrHidden', type=int, default=1000,
help="how many hidden units should be used for the net")
parser.add_argument('--epochs', type=int, default=500,
help='the maximum number of supervised epochs')
parser.add_argument('--rbmepochs', type=int, default=10,
help='the maximum number of unsupervised epochs')
args = parser.parse_args()
def similarityMain():
trainData1, trainData2, testData1, testData2, similaritiesTrain, similaritiesTest =\
splitDataMultiPIESubject(instanceToPairRatio=2, equalize=args.equalize)
print "training with dataset of size ", len(trainData1)
print len(trainData1)
print "testing with dataset of size ", len(testData1)
print "training with ", similaritiesTrain.sum(), "positive examples"
print "training with ", len(similaritiesTrain) - similaritiesTrain.sum(), "negative examples"
print "testing with ", similaritiesTest.sum(), "positive examples"
print "testing with ", len(similaritiesTest) - similaritiesTest.sum(), "negative examples"
print len(testData1)
trainData1, trainData2, similaritiesTrain = shuffle(trainData1, trainData2, similaritiesTrain)
testData1, testData2, similaritiesTest = shuffle(testData1, testData2, similaritiesTest)
if args.relu:
# Rmsprop does not work well on this with relu so we do not provide values
learningRate = 0.005
rbmLearningRate = 0.0005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
# Stochastic binary units
else:
if args.rmsprop:
learningRate = 0.005
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
rbmDropoutVis=1.0,
rmsprop=False,
momentumFactorForLearningRateRBM=False,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.001,
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, similaritiesTrain, epochs=args.epochs)
res = simNet.test(testData1, testData2)
# Try to change this threshold?
predicted = res > 0.5
correct = (similaritiesTest == predicted).sum() * 1.0 / len(res)
confMatrix = confusion_matrix(similaritiesTest, predicted)
print confMatrix
print "correct"
print correct
def similarityMainTestYale():
subjectsToImgs = readMultiPIESubjects(args.equalize)
trainData1, trainData2, trainSubjects1, trainSubjects2 =\
splitDataAccordingToLabels(subjectsToImgs, None, instanceToPairRatio=2)
similaritiesTrain = similarityDifferentLabels(trainSubjects1, trainSubjects2)
testData1, testData2, similaritiesTest = splitSimilarityYale(1, args.equalize)
trainData1, trainData2, similaritiesTrain = shuffle(trainData1, trainData2, similaritiesTrain)
testData1, testData2, similaritiesTest = shuffle(testData1, testData2, similaritiesTest)
print "training with dataset of size ", len(trainData1)
print len(trainData1)
print "testing with dataset of size ", len(testData1)
print len(testData1)
print "training with ", similaritiesTrain.sum(), "positive examples"
print "training with ", len(similaritiesTrain) - similaritiesTrain.sum(), "negative examples"
print "testing with ", similaritiesTest.sum(), "positive examples"
print "testing with ", len(similaritiesTest) - similaritiesTest.sum(), "negative examples"
if args.relu:
if args.rmsprop:
learningRate = 0.005
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.005
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
else:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
rbmDropoutVis=1.0,
rmsprop=False,
momentumFactorForLearningRateRBM=True,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.01,
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, similaritiesTrain, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
correct = (similaritiesTest == predicted).sum() * 1.0 / len(res)
confMatrix = confusion_matrix(similaritiesTest, predicted)
print confMatrix
print correct
def similarityDifferentSubjectsMain():
nrSubjects = 147
subjects = np.array(range(nrSubjects))
kf = cross_validation.KFold(n=len(subjects), n_folds=5)
for train, test in kf:
break
subjectsToImgs = readMultiPIESubjects(args.equalize)
subjectTrain = subjects[train]
subjectTest = subjects[test]
print "len(subjectTrain)"
print len(subjectTrain)
print "len(subjectTest)"
print len(subjectTest)
trainData1, trainData2, trainSubjects1, trainSubjects2 =\
splitDataAccordingToLabels(subjectsToImgs, subjectTrain, instanceToPairRatio=2)
testData1, testData2, testSubjects1, testSubjects2 =\
splitDataAccordingToLabels(subjectsToImgs, subjectTest, instanceToPairRatio=2)
print "training with dataset of size ", len(trainData1)
print "testing with dataset of size ", len(testData1)
similaritiesTrain = similarityDifferentLabels(trainSubjects1, trainSubjects2)
similaritiesTest = similarityDifferentLabels(testSubjects1, testSubjects2)
print "training with ", similaritiesTrain.sum(), "positive examples"
print "training with ", len(similaritiesTrain) - similaritiesTrain.sum(), "negative examples"
print "testing with ", similaritiesTest.sum(), "positive examples"
print "testing with ", len(similaritiesTest) - similaritiesTest.sum(), "negative examples"
trainData1, trainData2, similaritiesTrain = shuffle(trainData1, trainData2, similaritiesTrain)
testData1, testData2, similaritiesTest = shuffle(testData1, testData2, similaritiesTest)
# for i in xrange(10):
# plt.imshow(trainData1[i].reshape((40, 30)), cmap=plt.cm.gray)
# plt.show()
# plt.imshow(trainData2[i].reshape((40, 30)), cmap=plt.cm.gray)
# plt.show()
# print similaritiesTrain[i]
# for i in xrange(10):
# plt.imshow(testData1[i].reshape((40, 30)), cmap=plt.cm.gray)
# plt.show()
# plt.imshow(testData2[i].reshape((40, 30)), cmap=plt.cm.gray)
# plt.show()
# print similaritiesTest[i]
if args.relu:
learningRate = 0.005
rbmLearningRate = 0.0005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
else:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
momentumFactorForLearningRateRBM=False,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
rmsprop=False,
rbmDropoutVis=1.0,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.001,
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, similaritiesTrain, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
correct = (similaritiesTest == predicted).sum() * 1.0 / len(res)
confMatrix = confusion_matrix(similaritiesTest, predicted)
print confMatrix
print correct
def similarityCV():
trainData1, trainData2, testData1, testData2, similaritiesTrain, similaritiesTest =\
splitDataMultiPIESubject(instanceToPairRatio=2, equalize=args.equalize)
if args.relu:
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
else:
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
if args.relu:
if not args.sparsity:
# params = [(0.001, 0.005), (0.001, 0.001), (0.005, 0.001), (0.005, 0.005),
# (0.0001, 0.0005), (0.0001, 0.0001), (0.0005, 0.0001), (0.005, 0.0005),
# (0.001, 0.005), (0.001, 0.001), (0.005, 0.001), (0.005, 0.005)]
params = [(x,y) for x in [0.01, 0.005, 0.001, 0.005, 0.0005] for y in [0.01, 0.05, 0.001, 0.005 ,0.0005]]
# (0.001, 0.005), (0.001, 0.001), (0.005, 0.001), (0.005, 0.005),
# (0.001, 0.005), (0.001, 0.001), (0.005, 0.001), (0.005, 0.005)]
# params = [(0.001, 0.005, 0.1), (0.001, 0.001, 0.1), (0.005, 0.001, 0.001), (0.005, 0.005, 0.1),
# (0.001, 0.005, 0.01), (0.001, 0.001, 0.01), (0.005, 0.001, 0.001), (0.005, 0.005, 0.01),
# (0.001, 0.005, 0.001), (0.001, 0.001, 0.001), (0.005, 0.001, 0.001), (0.005, 0.005, 0.001),
# (0.001, 0.005, 0.0001), (0.001, 0.001, 0.0001), (0.005, 0.001, 0.001), (0.005, 0.005, 0.0001)]
else:
params = [(0.001, 0.005, 0.1), (0.001, 0.001, 0.01), (0.005, 0.001, 0.001), (0.005, 0.005, 0.0001),
(0.001, 0.005, 0.1), (0.001, 0.001, 0.01), (0.005, 0.001, 0.001), (0.005, 0.005, 0.0001),
(0.001, 0.005, 0.1), (0.001, 0.001, 0.01), (0.005, 0.001, 0.001), (0.005, 0.005, 0.0001)]
else:
if args.rmsprop:
params = [(0.0001, 0.01, 0.01), (0.0001, 0.005, 0.01), (0.001, 0.01, 0.01), (0.001, 0.005, 0.01)]
else:
params = [(0.01, 0.01), (0.01, 0.005), (0.0001, 0.05), (0.001, 0.005)]
kf = cross_validation.KFold(n=len(trainData1), n_folds=len(params))
correctForParams = []
# Try bigger values for the number of units: 2000?
fold = 0
for train, test in kf:
simNet = similarity.SimilarityNet(learningRate=params[fold][0],
maxMomentum=0.95,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
momentumFactorForLearningRateRBM=False,
rbmNrHid=args.nrHidden,
rbmLearningRate=params[fold][1],
rbmDropoutHid=1.0,
rmsprop=False,
rbmDropoutVis=1.0,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=params[fold][-1],
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, similaritiesTrain, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
print "predicted"
print predicted
correct = (similaritiesTest == predicted).sum() * 1.0 / len(res)
print "params[fold]"
print params[fold]
print "correct"
print correct
correctForParams += [correct]
fold += 1
for i in xrange(len(params)):
print "parameter tuple " + str(params[i]) + " achieved correctness of " + str(correctForParams[i])
def similarityCVEmotions():
data1, data2, labels = splitSimilaritiesPIE(instanceToPairRatio=2, equalize=args.equalize)
if args.relu:
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
# I am now doing it in the rbm level so it is not as important anymore to do it here
data1 = scale(data1)
data2 = scale(data2)
else:
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
if args.relu:
if args.rmsprop:
# params = [(0.001, 0.01), (0.001, 0.005), (0.001, 0.1), (0.001, 0.05)]
params = [ (0.005, 0.01), (0.005, 0.005), (0.005, 0.05)]
else:
params = [(0.001, 0.01), (0.001, 0.005), (0.001, 0.1), (0.001, 0.05)]
else:
if args.rmsprop:
params = [(0.0001, 0.01), (0.0001, 0.005), (0.001, 0.01), (0.001, 0.005)]
else:
params = [(0.0001, 0.01), (0.0001, 0.005), (0.001, 0.01), (0.001, 0.005)]
kf = cross_validation.KFold(n=len(data1), n_folds=len(params))
correctForParams = []
# Try bigger values for the number of units: 2000?
fold = 0
for train, test in kf:
trainData1 = data1[train]
trainData2 = data2[train]
trainLabels = labels[train]
testData1 = data1[test]
testData2 = data2[test]
testLabels = labels[test]
simNet = similarity.SimilarityNet(learningRate=params[fold][0],
maxMomentum=0.95,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
rbmLearningRate=params[fold][1],
rbmDropoutHid=1.0,
rbmDropoutVis=1.0,
momentumFactorForLearningRateRBM=True,
rmsprop=False,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=params[fold][-1],
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, trainLabels, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
print "predicted"
print predicted
correct = (testLabels == predicted).sum() * 1.0 / len(res)
print "params[fold]"
print params[fold]
print "correct"
print correct
correctForParams += [correct]
fold += 1
for i in xrange(len(params)):
print "parameter tuple " + str(params[i]) + " achieved correctness of " + str(correctForParams[i])
def similarityEmotionsMain():
trainData1, trainData2, trainLabels, testData1, testData2, testLabels =\
splitSimilaritiesPIEEmotions(instanceToPairRatio=2, equalize=args.equalize)
print "training with dataset of size ", len(trainData1)
print len(trainData1)
print "testing with dataset of size ", len(testData1)
print len(testData1)
if args.relu:
if args.rmsprop:
learningRate = 0.005
rbmLearningRate = 0.01
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
else:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
momentumFactorForLearningRateRBM=True,
rbmNrHid=args.nrHidden,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
rbmDropoutVis=1.0,
rmsprop=False,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.001,
sparsityTraget=0.01)
print "training with ", trainLabels.sum(), "positive examples"
print "training with ", len(trainLabels) - trainLabels.sum(), "negative examples"
print "testing with ", testLabels.sum(), "positive examples"
print "testing with ", len(testLabels) - testLabels.sum(), "negative examples"
final = []
for i in xrange(len(trainData1)):
if i > 6:
break
# Create 1 by 1 image
res = np.vstack([trainData1[i].reshape(40,30), trainData2[i].reshape(40,30)])
final += [res]
final = np.hstack(final)
plt.imshow(final, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
simNet.train(trainData1, trainData2, trainLabels, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
correct = (testLabels == predicted).sum() * 1.0 / len(res)
confMatrix = confusion_matrix(testLabels, predicted)
print confMatrix
print correct
def similarityEmotionsSameSubject():
trainData1, trainData2, trainLabels, testData1, testData2, testLabels =\
splitEmotionsMultiPieKeepSubjectsTestTrain(instanceToPairRatio=2, equalize=args.equalize)
print "training with dataset of size ", len(trainData1)
print len(trainData1)
print "testing with dataset of size ", len(testData1)
print len(testData1)
if args.relu:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
else:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
momentumFactorForLearningRateRBM=True,
rbmDropoutVis=1.0,
rmsprop=False,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.001,
sparsityTraget=0.01)
print "training with ", trainLabels.sum(), "positive examples"
print "training with ", len(trainLabels) - trainLabels.sum(), "negative examples"
print "testing with ", testLabels.sum(), "positive examples"
print "testing with ", len(testLabels) - testLabels.sum(), "negative examples"
simNet.train(trainData1, trainData2, trainLabels, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
correct = (testLabels == predicted).sum() * 1.0 / len(res)
confMatrix = confusion_matrix(testLabels, predicted)
print confMatrix
print correct
def similaritySameSubjectDifferentEmotionsValues():
emotions = [0, 3, 5]
trainData1, trainData2, similaritiesTrain, testData1, testData2, pairs = splitForSimilaritySameSubjectsDifferentEmotions(args.equalize,
emotions, perSubject=2)
# print "pairs"
# print pairs
print "training with dataset of size ", len(trainData1)
print len(trainData1)
print "testing with dataset of size ", len(testData1)
print "training with ", similaritiesTrain.sum(), "positive examples"
print "training with ", len(similaritiesTrain) - similaritiesTrain.sum(), "negative examples"
if args.relu:
if args.rmsprop:
learningRate = 0.005
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.005
rbmLearningRate = 0.005
maxMomentum = 0.95
visibleActivationFunction = Identity()
hiddenActivationFunction = RectifiedNoisy()
# IMPORTANT: SCALE THE DATA IF YOU USE GAUSSIAN VISIBlE UNITS
testData1 = scale(testData1)
testData2 = scale(testData2)
trainData1 = scale(trainData1)
trainData2 = scale(trainData2)
# Stochastic binary units
else:
if args.rmsprop:
learningRate = 0.001
rbmLearningRate = 0.005
maxMomentum = 0.95
else:
learningRate = 0.001
rbmLearningRate = 0.05
maxMomentum = 0.95
visibleActivationFunction = Sigmoid()
hiddenActivationFunction = Sigmoid()
simNet = similarity.SimilarityNet(learningRate=learningRate,
maxMomentum=maxMomentum,
visibleActivationFunction=visibleActivationFunction,
hiddenActivationFunction=hiddenActivationFunction,
rbmNrVis=1200,
rbmNrHid=args.nrHidden,
rbmLearningRate=rbmLearningRate,
rbmDropoutHid=1.0,
rbmDropoutVis=1.0,
rmsprop=False,
trainingEpochsRBM=args.rbmepochs,
nesterovRbm=True,
momentumFactorForLearningRateRBM=True,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.01,
sparsityTraget=0.01)
simNet.train(trainData1, trainData2, similaritiesTrain, epochs=args.epochs)
res = simNet.test(testData1, testData2)
predicted = res > 0.5
correct = (predicted == 1.0).sum() * 1.0 / len(predicted)
# make all pairs
emotionParis = [(x,y) for x in emotions for y in emotions]
for pair in emotionParis:
print pair
indices = (pairs == np.array(pair))
indices = np.all(indices, axis=1)
print "indices"
print indices.sum()
print np.mean(res[indices])
print correct
def main():
if args.cv:
similarityCV()
elif args.cvEmotion:
similarityCVEmotions()
elif args.diffsubjects:
similarityDifferentSubjectsMain()
elif args.testYaleMain:
similarityMainTestYale()
elif args.emotionsdiff:
similarityEmotionsMain()
elif args.emotionsdiffsamesubj:
similarityEmotionsSameSubject()
elif args.emotionssim:
similaritySameSubjectDifferentEmotionsValues()
else:
similarityMain()
if __name__ == '__main__':
main()
| bsd-3-clause |
rahuldhote/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/frame/test_analytics.py | 1 | 86626 | # -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics():
# ---------------------------------------------------------------------=
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = nan
float_frame['B'][5:10] = nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = nan
float_frame['B'][5:10] = nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = nan
float_frame['B'][5:10] = nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = nan
float_frame['B'][5:10] = nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', 'spearman', "
"or 'kendall'")
with tm.assert_raises_regex(ValueError, msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = nan
float_frame['B'][:10] = nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self, float_frame_with_na, float_frame, float_string_frame):
f = lambda s: notna(s).sum()
assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False,
check_dtype=False, check_dates=True)
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH 423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self, float_frame_with_na, float_frame,
float_string_frame):
f = lambda s: len(algorithms.unique1d(s.dropna()))
assert_stat_op_calc('nunique', f, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self, float_frame_with_na, mixed_float_frame,
float_frame, float_string_frame):
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH 676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self, float_frame_with_na, float_frame, float_string_frame):
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_api('mean', float_frame, float_string_frame)
def test_product(self, float_frame_with_na, float_frame,
float_string_frame):
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_api('product', float_frame, float_string_frame)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, float_frame,
float_string_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_api('median', float_frame, float_string_frame)
def test_min(self, float_frame_with_na, int_frame,
float_frame, float_string_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
assert_stat_op_calc('min', np.min, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('min', np.min, int_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = nan
datetime_frame.loc[10:15, 1] = nan
datetime_frame.loc[15:, 2] = nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = nan
datetime_frame.loc[10:15, 1] = nan
datetime_frame.loc[15:, 2] = nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
def test_max(self, float_frame_with_na, int_frame,
float_frame, float_string_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
assert_stat_op_calc('max', np.max, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('max', np.max, int_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
def test_mad(self, float_frame_with_na, float_frame, float_string_frame):
f = lambda x: np.abs(x - x.mean()).mean()
assert_stat_op_calc('mad', f, float_frame_with_na)
assert_stat_op_api('mad', float_frame, float_string_frame)
def test_var_std(self, float_frame_with_na, datetime_frame, float_frame,
float_string_frame):
alt = lambda x: np.var(x, ddof=1)
assert_stat_op_calc('var', alt, float_frame_with_na)
assert_stat_op_api('var', float_frame, float_string_frame)
alt = lambda x: np.std(x, ddof=1)
assert_stat_op_calc('std', alt, float_frame_with_na)
assert_stat_op_api('std', float_frame, float_string_frame)
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH 16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = nan
datetime_frame.loc[10:15, 1] = nan
datetime_frame.loc[15:, 2] = nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = nan
datetime_frame.loc[10:15, 1] = nan
datetime_frame.loc[15:, 2] = nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self, float_frame_with_na, datetime_frame,
float_frame, float_string_frame):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
assert_stat_op_calc('sem', alt, float_frame_with_na)
assert_stat_op_api('sem', float_frame, float_string_frame)
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self, float_frame_with_na, float_frame, float_string_frame):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
assert_stat_op_calc('skew', alt, float_frame_with_na)
assert_stat_op_api('skew', float_frame, float_string_frame)
@td.skip_if_no_scipy
def test_kurt(self, float_frame_with_na, float_frame, float_string_frame):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('kurt', alt, float_frame_with_na)
assert_stat_op_api('kurt', float_frame, float_string_frame)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self, empty_frame):
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median_corner(self, int_frame, float_frame, float_string_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
assert_stat_op_api('median', float_frame, float_string_frame)
# Miscellanea
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# GH 12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH 11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH 11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH 11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self, float_frame):
median = float_frame.median().median()
original = float_frame.copy()
capped = float_frame.clip_upper(median)
assert not (capped.values > median).any()
floored = float_frame.clip_lower(median)
assert not (floored.values < median).any()
double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that float_frame was not changed inplace
assert (float_frame.values == original.values).all()
def test_inplace_clip(self, float_frame):
# GH 15388
median = float_frame.median().median()
frame_copy = float_frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = float_frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = float_frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH 2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH 6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, simple_frame,
inplace, lower, axis, res):
# GH 15390
original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self, float_frame):
"""Should process np.nan argument as None """
# GH 17276
tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan),
float_frame)
# GH 19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
expected = a.dot(a.iloc[0])
tm.assert_series_equal(result, expected)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH 10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
@pytest.mark.parametrize('method,expected', [
('nlargest',
pd.DataFrame({'a': [2, 2, 2, 1], 'b': [3, 2, 1, 3]},
index=[2, 1, 0, 3])),
('nsmallest',
pd.DataFrame({'a': [1, 1, 1, 2], 'b': [1, 2, 3, 1]},
index=[5, 4, 3, 0]))])
def test_duplicates_on_starter_columns(self, method, expected):
# regression test for #22752
df = pd.DataFrame({
'a': [2, 2, 2, 1, 1, 1],
'b': [1, 2, 3, 3, 2, 1]
})
result = getattr(df, method)(4, columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_n_identical_values(self):
# GH 15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# GH 16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
andreadelprete/sot-torque-control | python/compress_identification_data_old_until_12_2015.py | 2 | 8479 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 23 09:02:21 2015
@author: adelpret
q.shape"""
import numpy as np
import matplotlib.pyplot as plt
from plot_utils import *
from compute_estimates_from_sensors import compute_estimates_from_sensors
FOLDER_ID = 10;
EST_DELAY = 0.02; ''' delay introduced by the estimation in seconds '''
NJ = 30; ''' number of joints '''
DT = 0.001; ''' sampling period '''
PLOT_DATA = True;
FORCE_ESTIMATE_RECOMPUTATION = True;
NEGLECT_GYROSCOPE = True;
NEGLECT_ACCELEROMETER = True;
SET_NORMAL_FORCE_RIGHT_FOOT_TO_ZERO = False;
JOINT_ID = np.array(range(12)); ''' IDs of the joints to save '''
if(FOLDER_ID==1):
data_folder = '../results/20150422_182432_rhp_torque_id/';
elif(FOLDER_ID==2):
data_folder = '../results/20150423_102257_rhp_torque_id/';
elif(FOLDER_ID==3):
data_folder = '../results/20150423_173259_rhp_torque_id_contact/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==4):
data_folder = '../results/20150424_100759_rhp_torque_id_2ways/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==5):
data_folder = '../results/20150424_112301_rhp_torque_id_multi_conf/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==6):
data_folder = '../results/20150424_135254_rhp_torque_id_multi_norm_force/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==7):
data_folder = '../results/20150430_095211_rhp_id_zero_normal_force/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==8):
data_folder = '../results/20150430_103057_rhp_id_100_normal_force/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==9):
data_folder = '../results/20150505_113323_rhp_id_clamp/';
JOINT_ID = np.array([2,3,4]);
elif(FOLDER_ID==10):
data_folder = '../results/20160203_172245_id_rhp_pwm/';
JOINT_ID = np.array([2]);
FILE_READ_SUCCEEDED = False;
DATA_FILE_NAME = 'data';
TEXT_DATA_FILE_NAME = 'data.txt';
N_DELAY = int(EST_DELAY/DT);
file_name_qDes = 'dg_HRP2LAAS-control.dat';
file_name_enc = 'dg_HRP2LAAS-robotState.dat';
file_name_acc = 'dg_HRP2LAAS-accelerometer.dat';
file_name_gyro = 'dg_HRP2LAAS-gyrometer.dat';
file_name_forceLA = 'dg_HRP2LAAS-forceLARM.dat';
file_name_forceRA = 'dg_HRP2LAAS-forceRARM.dat';
file_name_forceLL = 'dg_HRP2LAAS-forceLLEG.dat';
file_name_forceRL = 'dg_HRP2LAAS-forceRLEG.dat';
file_name_ptorque = 'dg_HRP2LAAS-ptorque.dat';
file_name_current = 'dg_HRP2LAAS-currents.dat';
file_name_p_gain = 'dg_HRP2LAAS-p_gains.dat';
''' Load data from file '''
try:
data = np.load(data_folder+DATA_FILE_NAME+'.npz');
time = data['time'];
enc = data['enc'];
acc = data['acc'];
gyro = data['gyro'];
forceLA = data['forceLA'];
forceRA = data['forceRA'];
forceLL = data['forceLL'];
forceRL = data['forceRL'];
N = acc.shape[0];
qDes = np.empty((N,len(JOINT_ID)));
dq = np.empty((N,len(JOINT_ID)));
ddq = np.empty((N,len(JOINT_ID)));
tau = np.empty((N,len(JOINT_ID)));
ptorques = np.empty((N,len(JOINT_ID)));
p_gains = np.empty((N,len(JOINT_ID)));
currents = np.empty((N,len(JOINT_ID)));
FILE_READ_SUCCEEDED = True;
for i in range(len(JOINT_ID)):
data = np.load(data_folder+DATA_FILE_NAME+'_j'+str(JOINT_ID[i])+'.npz');
qDes[:,i] = data['qDes'];
ptorques[:,i] = data['ptorque'];
p_gains[:,i] = data['p_gain'];
currents[:,i] = data['current'];
if(FORCE_ESTIMATE_RECOMPUTATION==False):
dq[:,i] = data['dq'];
ddq[:,i] = data['ddq'];
tau[:,i] = data['tau'];
except (IOError, KeyError):
print 'Gonna read text files...'
qDes = np.loadtxt(data_folder+file_name_qDes);
enc = np.loadtxt(data_folder+file_name_enc);
acc = np.loadtxt(data_folder+file_name_acc);
gyro = np.loadtxt(data_folder+file_name_gyro);
forceLA = np.loadtxt(data_folder+file_name_forceLA);
forceRA = np.loadtxt(data_folder+file_name_forceRA);
forceLL = np.loadtxt(data_folder+file_name_forceLL);
forceRL = np.loadtxt(data_folder+file_name_forceRL);
ptorques = np.loadtxt(data_folder+file_name_ptorque);
currents = np.loadtxt(data_folder+file_name_current);
p_gains = np.loadtxt(data_folder+file_name_p_gain);
# check that largest signal has same length of smallest signal
n_enc = len(enc[:,0]);
n_acc = len(acc[:,0]);
if(n_acc!=n_enc):
print "Reducing size of signals from %d to %d" % (n_acc, n_enc);
N = np.min([n_enc,n_acc]);
time = enc[:N,0];
qDes = qDes[:N,1:];
qDes = qDes[:,JOINT_ID].reshape(N,len(JOINT_ID));
enc = enc[:N,7:];
acc = acc[:N,1:];
gyro = gyro[:N,1:];
forceLA = forceLA[:N,1:];
forceRA = forceRA[:N,1:];
forceLL = forceLL[:N,1:];
forceRL = forceRL[:N,1:];
ptorques = ptorques[:N,1:];
currents = currents[:N,1:];
p_gains = p_gains[:N,1:];
# save sensor data
np.savez(data_folder+DATA_FILE_NAME+'.npz',
time=time,
enc=enc.reshape(N,NJ),
acc=acc.reshape(N,3),
gyro=gyro.reshape(N,3),
forceLA=forceLA.reshape(N,6),
forceRA=forceRA.reshape(N,6),
forceLL=forceLL.reshape(N,6),
forceRL=forceRL.reshape(N,6));
N = len(enc[:,0]);
if(FORCE_ESTIMATE_RECOMPUTATION or FILE_READ_SUCCEEDED==False):
print 'Gonna estimate dq, ddq, tau';
dt='f4';
a = np.zeros(N, dtype=[ ('enc',dt,NJ)
,('forceLA',dt,6)
,('forceRA',dt,6)
,('forceLL',dt,6)
,('forceRL',dt,6)
,('acc',dt,3)
,('gyro',dt,3)
,('time',dt,1)]);
a['enc'] = enc;
a['forceLA'] = forceLA;
a['forceRA'] = forceRA;
a['forceLL'] = forceLL;
a['forceRL'] = forceRL;
if(SET_NORMAL_FORCE_RIGHT_FOOT_TO_ZERO):
a['forceRL'][:,2] = 0.0;
if(NEGLECT_ACCELEROMETER):
a['acc'] = np.mean(acc,0);
else:
a['acc'] = acc;
if(NEGLECT_GYROSCOPE==False):
a['gyro'] = gyro;
a['time'] = np.squeeze(time*DT);
(tau, dq, ddq) = compute_estimates_from_sensors(a, EST_DELAY);
# shift estimate backward in time to compensate for estimation delay
dq[:-N_DELAY,:] = dq[N_DELAY::,:];
ddq[:-N_DELAY,:] = ddq[N_DELAY::,:];
tau[:-N_DELAY,:] = tau[N_DELAY::,:];
# set last N_DELAY sample to constant value
dq[-N_DELAY:,:] = dq[-N_DELAY,:];
ddq[-N_DELAY:,:] = ddq[-N_DELAY,:];
tau[-N_DELAY:,:] = tau[-N_DELAY,:];
# eliminate data of joints not to save
dq = dq[:,JOINT_ID].reshape(N,len(JOINT_ID));
ddq = ddq[:,JOINT_ID].reshape(N,len(JOINT_ID));
tau = tau[:,JOINT_ID].reshape(N,len(JOINT_ID));
for i in range(len(JOINT_ID)):
np.savez(data_folder+DATA_FILE_NAME+'_j'+str(JOINT_ID[i])+'.npz', qDes=qDes[:,i],
enc=enc[:,JOINT_ID[i]], tau=tau[:,i], dq=dq[:,i], ddq=ddq[:,i], current=currents[:,i],
ptorque=ptorques[:,i], p_gain=p_gains[:,i]);
if(PLOT_DATA):
''' Plot data '''
plt.figure(); plt.plot(acc); plt.title('Acc');
plt.figure(); plt.plot(gyro); plt.title('Gyro');
plt.figure(); plt.plot(forceLA); plt.title('Force Left Arm');
plt.figure(); plt.plot(forceRA); plt.title('Force Right Arm');
plt.figure(); plt.plot(forceLL); plt.title('Force Left Leg');
plt.figure(); plt.plot(forceRL); plt.title('Force Right Leg');
for i in range(len(JOINT_ID)):
# plt.figure(); plt.plot(enc[:,JOINT_ID[i]]-qDes[:,i]); plt.title('Delta_q '+str(JOINT_ID[i]));
# plt.figure(); plt.plot(dq[:,i]); plt.title('Joint velocity '+str(JOINT_ID[i]));
plt.figure(); plt.plot(tau[:,i]); plt.title('Joint torque '+str(JOINT_ID[i]));
plt.figure(); plt.plot(tau[:,i], qDes[:,i]-enc[:,JOINT_ID[i]]); plt.title('Torque VS delta_q '+str(JOINT_ID[i]));
# plt.figure(); plt.plot(tau[:,i], currents[:,i]); plt.title('Torque VS current '+str(JOINT_ID[i]));
plt.figure(); plt.plot(tau[:,i], ptorques[:,i]); plt.title('Torque VS pseudo-torque '+str(JOINT_ID[i]));
plt.figure(); plt.plot(ptorques[:,i], qDes[:,i]-enc[:,JOINT_ID[i]]); plt.title('Pseudo-torque VS delta_q '+str(JOINT_ID[i]));
# plt.figure(); plt.plot(p_gains[:,i]); plt.title('Proportional gain '+str(JOINT_ID[i]));
plt.show();
| lgpl-3.0 |
cbertinato/pandas | pandas/tests/arrays/categorical/test_analytics.py | 1 | 11955 | import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, Series
from pandas.api.types import is_scalar
import pandas.util.testing as tm
class TestCategoricalAnalytics:
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = "Categorical is not ordered for operation {}"
with pytest.raises(TypeError, match=msg.format('min')):
cat.min()
with pytest.raises(TypeError, match=msg.format('max')):
cat.max()
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
_min = cat.min(numeric_only=True)
assert _min == "c"
_max = cat.max(numeric_only=True)
assert _max == "b"
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
_min = cat.min(numeric_only=True)
assert _min == 2
_max = cat.max(numeric_only=True)
assert _max == 1
@pytest.mark.parametrize("values,categories,exp_mode", [
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4])])
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
c1 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=True)
s1 = Series(c1)
c2 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=False)
s2 = Series(c2)
# Searching for single item argument, side='left' (default)
res_cat = c1.searchsorted('apple')
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = s1.searchsorted('apple')
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = c1.searchsorted(['bread'])
res_ser = s1.searchsorted(['bread'])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = c1.searchsorted(['apple', 'bread'], side='right')
res_ser = s1.searchsorted(['apple', 'bread'], side='right')
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
msg = r"Value\(s\) to be inserted must be in categories"
with pytest.raises(KeyError, match=msg):
c1.searchsorted('cucumber')
with pytest.raises(KeyError, match=msg):
s1.searchsorted('cucumber')
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match=msg):
c1.searchsorted(['bread', 'cucumber'])
with pytest.raises(KeyError, match=msg):
s1.searchsorted(['bread', 'cucumber'])
# searchsorted call for unordered Categorical
msg = "Categorical not ordered"
with pytest.raises(ValueError, match=msg):
c2.searchsorted('apple')
with pytest.raises(ValueError, match=msg):
s2.searchsorted('apple')
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, 'a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(['foo', 'foo', 'bar'])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list('ABABC'), categories=list('CBA'), ordered=True)
result = c.map(lambda x: x.lower())
exp = Categorical(list('ababc'), categories=list('cba'), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list('ABABC'), categories=list('ABC'), ordered=False)
result = c.map(lambda x: x.lower())
exp = Categorical(list('ababc'), categories=list('abc'), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
def test_validate_inplace(self):
cat = Categorical(['A', 'B', 'B', 'C', 'A'])
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError):
cat.set_categories(['X', 'Y', 'Z'], rename=True, inplace=value)
with pytest.raises(ValueError):
cat.rename_categories(['X', 'Y', 'Z'], inplace=value)
with pytest.raises(ValueError):
cat.reorder_categories(
['X', 'Y', 'Z'], ordered=True, inplace=value)
with pytest.raises(ValueError):
cat.add_categories(
new_categories=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_categories(removals=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError):
cat.sort_values(inplace=value)
def test_isna(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isna()
tm.assert_numpy_array_equal(res, exp)
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/mixture/gmm.py | 6 | 31222 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
zhen-he/TensNN | scripts/draw.py | 1 | 1285 | import json
import matplotlib.pyplot as plt
drawTrain = 1
str = 'val'
if drawTrain == 1:
str = 'train'
with open('t3_s128_multi_80.json', 'r') as f1:
data1 = json.load(f1)
hist1 = data1[str + '_loss_history']
time1 = data1['forward_backward_times']
memory1 = data1['memory_usage']
# with open('t33_s517_nodp_10000.json', 'r') as f2:
# data2 = json.load(f2)
# hist2 = data2[str + '_loss_history']
# time2 = data2['forward_backward_times']
# memory2 = data2['memory_usage']
#
# with open('t7_s700_nodp_14240.json', 'r') as f3:
# data3 = json.load(f3)
# hist3 = data3[str + '_loss_history']
# time3 = data3['forward_backward_times']
# memory3 = data3['memory_usage']
#
# with open('t44_s517_nodp_14240.json', 'r') as f4:
# data4 = json.load(f4)
# hist4 = data4[str + '_loss_history']
# time4 = data4['forward_backward_times']
# memory4 = data4['memory_usage']
plt.figure(1)
plt.plot(hist1, 'r-')
# plt.plot(hist1, 'r-', hist2, 'g-', hist3, 'b-', hist4, 'y-')
plt.ylabel('loss')
# plt.figure(2)
# # plt.plot(time1, 'r-', time2, 'g-')
# plt.plot(time1, 'r-', time2, 'g-', time3, 'b-')
# plt.ylabel('time')
#
# plt.figure(3)
# # plt.plot(memory1, 'r-', memory2, 'g-')
# plt.plot(memory1, 'r-', memory2, 'g-', memory3, 'b-')
# plt.ylabel('memory')
plt.show()
| mit |
yanlend/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.