repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nss350/magPy | tests/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| apache-2.0 |
lindsayad/jupyter_notebooks | moose_calc_routines.py | 1 | 16941 | import sympy as sp
import re
import numpy as np
import matplotlib.pyplot as plt
from subprocess import call
import os
'''
Calculus methods
'''
def eye2():
return sp.Matrix([[sp.Integer(1), sp.Integer(0)], [sp.Integer(0), sp.Integer(1)]])
def zeroVec2():
return sp.Matrix([sp.Integer(0), sp.Integer(0)])
def gradVec2(u_vec, x, y):
return sp.Matrix([[sp.diff(u_vec[0], x), sp.diff(u_vec[1],x)], [sp.diff(u_vec[0], y), sp.diff(u_vec[1], y)]])
def divTen2(tensor, x, y):
return sp.Matrix([sp.diff(tensor[0,0], x) + sp.diff(tensor[1,0], y), sp.diff(tensor[0, 1], x) + sp.diff(tensor[1,1], y)])
def divVec2(u_vec, x, y):
return sp.diff(u_vec[0], x) + sp.diff(u_vec[1], y)
def gradScalar2(u, x, y):
return sp.Matrix([sp.diff(u, x), sp.diff(u,y)])
def strain_rate(u_vec, x, y):
return gradVec2(u_vec, x, y) + gradVec2(u_vec, x, y).transpose()
def strain_rate_squared_2(u_vec, x, y):
tensor = gradVec2(u_vec, x, y) + gradVec2(u_vec, x, y).transpose()
rv = 0
for i in range(2):
for j in range(2):
rv += tensor[i, j] * tensor[i, j]
return rv
def laplace2(u, x, y):
return sp.diff(sp.diff(u, x), x) + sp.diff(sp.diff(u, y), y)
'''
Kernel operators and corresponding surface integral terms
'''
def L_diffusion(u, x, y):
return -laplace2(u, x, y)
def bc_terms_diffusion(u, nvec, x, y):
return (-nvec.transpose() * gradScalar2(u, x, y))[0,0]
def L_momentum_traction(uvec, p, k, eps, x, y):
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose(), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
turbulent_visc_term = -(divTen2(rho * cmu * k**2 / eps * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose()), x, y)).transpose()
# print(visc_term.shape, conv_term.shape, pressure_term.shape, sep="\n")
source = conv_term + visc_term + pressure_term + turbulent_visc_term
return source
def bc_terms_momentum_traction(uvec, nvec, p, k, eps, x, y, symbolic=True, parts=True):
if symbolic:
cmu = sp.var('c_{\mu}')
else:
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * nvec.transpose() * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose())).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
turbulent_visc_term = -(nvec.transpose() * (rho * cmu * k**2 / eps * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose()))).transpose()
return visc_term + turbulent_visc_term + pressure_term
def L_momentum_traction_no_turbulence(uvec, p, x, y):
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose(), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
source = conv_term + visc_term + pressure_term
return source
def bc_terms_momentum_traction_no_turbulence(uvec, nvec, p, x, y, parts=True):
mu, rho = sp.var('mu rho')
# visc_term = (-mu * nvec.transpose() * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose())).transpose()
visc_term = (-mu * nvec.transpose() * strain_rate(uvec, x, y)).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return visc_term + pressure_term
def L_momentum_laplace(uvec, p, k, eps, x, y):
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
turbulent_visc_term = -(divTen2(rho * cmu * k**2 / eps * (gradVec2(uvec, x, y)), x, y)).transpose()
# print(visc_term.shape, conv_term.shape, pressure_term.shape, sep="\n")
source = conv_term + visc_term + pressure_term + turbulent_visc_term
return source
def L_pressure(uvec, x, y):
return -divVec2(uvec, x, y)
def L_kin(uvec, k, eps, x, y):
cmu = 0.09
sigk = 1.
sigeps = 1.3
c1eps = 1.44
c2eps = 1.92
conv_term = rho * uvec.transpose() * gradScalar2(k, x, y)
diff_term = - divVec2((mu + rho * cmu * k**2 / eps / sigk) * gradScalar2(k, x, y), x, y)
creation_term = - rho * cmu * k**2 / eps / 2 * strain_rate_squared_2(uvec, x, y)
destruction_term = rho * eps
terms = [conv_term[0,0], diff_term, creation_term, destruction_term]
L = 0
for term in terms:
L += term
return L
def L_eps(uvec, k, eps, x, y):
cmu = 0.09
sigk = 1.
sigeps = 1.3
c1eps = 1.44
c2eps = 1.92
conv_term = rho * uvec.transpose() * gradScalar2(eps, x, y)
diff_term = - divVec2((mu + rho * cmu * k**2 / eps / sigeps) * gradScalar2(eps, x, y), x, y)
creation_term = - rho * c1eps * cmu * k / 2 * strain_rate_squared_2(uvec, x, y)
destruction_term = rho * c2eps * eps**2 / k
terms = [conv_term[0,0], diff_term, creation_term, destruction_term]
L = 0
for term in terms:
L += term
return L
def L_coupled_gradient_source(v, x, y):
return (-gradScalar2(v, x, y).transpose() * gradScalar2(v, x, y))[0,0]
def bc_terms_eps(nvec, k, eps, x, y):
cmu = 0.09
sigeps = 1.3
mu, rho = sp.var('mu rho')
return - nvec.transpose() * (mu + rho * cmu * k**2 / eps / sigeps) * gradScalar2(eps, x, y)
'''
Boundary condition operators
'''
def wall_function_momentum_traction(uvec, nvec, p, k, eps, x, y, tau_type, symbolic=True, parts=True):
# import pdb; pdb.set_trace()
if symbolic:
cmu = sp.var('c_{\mu}')
yStarPlus = sp.var('y_{\mu}')
else:
cmu = 0.09
yStarPlus = 11.06
if tau_type == "vel":
uvec_norm = sp.sqrt(uvec.transpose() * uvec)[0, 0]
uTau = uvec_norm / yStarPlus
elif tau_type == "kin":
uTau = cmu**.25 * sp.sqrt(k)
else:
raise ValueError("Must either pass 'vel' or 'kin' for tau_type")
mu, rho = sp.var('mu rho')
normal_stress_term = (-nvec.transpose() * mu * strain_rate(uvec, x, y) * nvec * nvec.transpose()).transpose()
tangential_stress_term = uTau / yStarPlus * uvec
muT = rho * cmu * k * k / eps
turbulent_stress_term = (-nvec.transpose() * muT * strain_rate(uvec, x, y)).transpose()
# turbulent_stress_term = (-nvec.transpose() * strain_rate(uvec, x, y)).transpose()
# turbulent_stress_term = (-nvec.transpose() * sp.Matrix([[1, 1], [1, 1]])).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return normal_stress_term + tangential_stress_term + turbulent_stress_term + pressure_term
# return pressure_term + normal_stress_term + turbulent_stress_term
# return normal_stress_term + tangential_stress_term + pressure_term
def no_bc_bc(uvec, nvec, p, x, y, parts=True):
mu, rho = sp.var('mu rho')
# visc_term = (-mu * nvec.transpose() * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose())).transpose()
visc_term = (-mu * nvec.transpose() * strain_rate(uvec, x, y)).transpose()
# visc_term = (-mu * nvec.transpose() * sp.Matrix([[1, 1], [1, 1]])).transpose()
import pdb; pdb.set_trace()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return visc_term + pressure_term
def vacuum(u, nvec):
return u / sp.Integer(2)
def ins_epsilon_wall_function_bc(nvec, k, eps, x, y):
cmu = 0.09
sigEps = 1.3
kappa = 0.41
mu, rho = sp.var('mu rho')
muT = rho * cmu * k**2 / eps
return - (mu + muT / sigEps) * kappa * cmu**.25 * sp.sqrt(k) * eps * rho / muT
def coupled_gradient_bc(nvec, v, x, y):
return (-nvec.transpose() * gradScalar2(v, x, y))[0,0]
def coupled_value_bc(v, x, y):
return -v
'''
Writing utilities
'''
def prep_moose_input(sym_expr):
rep1 = re.sub(r'\*\*',r'^',str(sym_expr))
rep2 = re.sub(r'mu',r'${mu}',rep1)
rep3 = re.sub(r'rho',r'${rho}',rep2)
return rep3
def write_all_functions(uVecNew, p, kinNew, epsilonNew, x, y):
target = open('/home/lindsayad/python/mms_input.txt','w')
target.write("[Functions]" + "\n")
target.write(" [./u_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_momentum_traction(uVecNew, p, kinNew, epsilonNew, x, y)[0]) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./v_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_momentum_traction(uVecNew, p, kinNew, epsilonNew, x, y)[1]) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./p_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_pressure(uVecNew, x, y)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./kin_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_kin(uVecNew, kinNew, epsilonNew, x, y)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./epsilon_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_eps(uVecNew, kinNew, epsilonNew, x, y)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./u_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(uNew) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./v_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(vNew) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./p_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(pNew) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./kin_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(kinNew) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./epsilon_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(epsilonNew) + "'" + "\n")
target.write(" [../]" + "\n")
target.write("[]" + "\n")
target.close()
def write_reduced_functions(uVecNew, kinNew, epsilonNew, x, y):
target = open('/home/lindsayad/python/mms_input.txt','w')
target.write("[Functions]" + "\n")
target.write(" [./u_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_momentum_traction(uVecNew, sp.Integer(0), kinNew, epsilonNew, x, y)[0]) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./kin_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_kin(uVecNew, kinNew, epsilonNew, x, y)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./epsilon_source_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + prep_moose_input(L_eps(uVecNew, kinNew, epsilonNew, x, y)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./u_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(prep_moose_input(uVecNew[0])) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./kin_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(prep_moose_input(kinNew)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write(" [./epsilon_func]" + "\n")
target.write(" type = ParsedFunction" + "\n")
target.write(" value = '" + str(prep_moose_input(epsilonNew)) + "'" + "\n")
target.write(" [../]" + "\n")
target.write("[]" + "\n")
target.close()
from random import randint, random, uniform
def sym_func(x, y, L):
return round(uniform(.1, .99),1) + round(uniform(.1, .99),1) * sp.sin(round(uniform(.1, .99),1) * sp.pi * x / L) \
+ round(uniform(.1, .99),1) * sp.sin(round(uniform(.1, .99),1) * sp.pi * y / L) \
+ round(uniform(.1, .99),1) * sp.sin(round(uniform(.1, .99),1) * sp.pi * x * y / L)
'''
Context manager for changing the current working directory
Courtesy of http://stackoverflow.com/a/13197763/4493669
'''
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
'''
Function for running MMS simulation cases; currently programmed for addressing BC testing
'''
def mms_bc_cases(h_list, neumann_source_dict, volume_source_dict, solution_dict,
bounds_dict, base, exe_path, input_dir, test_var="u", natural=False):
with cd(input_dir):
for h in h_list:
for bnd, anti_bnds in bounds_dict.items():
args = [exe_path, "-i", base + ".i", "Mesh/nx=" + h, #"Mesh/ny=" + h,
# "mu=1.5", "rho=2.5",
"BCs/" + str(test_var) + "/boundary=" + anti_bnds,
"BCs/" + str(test_var) + "_fn_neumann/boundary=" + bnd,
"Outputs/csv/file_base=" + h + "_" + bnd + "_" + base,
"Outputs/exodus/file_base=" + h + "_" + bnd + "_" + base,
"Functions/" + str(test_var) + "_bc_func/value=" + neumann_source_dict[bnd]]
if not natural:
args.append("BCs/" + str(test_var) + "_test/boundary=" + bnd)
# import pdb; pdb.set_trace()
for var, func in volume_source_dict.items():
string = "Functions/" + var + "_source_func/value=" + str(func)
args.append(string)
for var, func in solution_dict.items():
args.append("Functions/" + var + "_func/value=" + str(func))
call(args)
'''
Function for running MMS simulation cases; currently programmed for kernels only
'''
def mms_kernel_cases(h_list, volume_source_dict, solution_dict, base, exe_path, input_dir):
with cd(input_dir):
for h in h_list:
args = [exe_path, "-i", base + ".i", "Mesh/nx=" + h, "Mesh/ny=" + h,
# "mu=1.5", "rho=2.5",
"Outputs/csv/file_base=" + h + "_" + base,
"Outputs/exodus/file_base=" + h + "_" + base]
for var, func in volume_source_dict.items():
string = "Functions/" + var + "_source_func/value=" + str(func)
args.append(string)
for var, func in solution_dict.items():
args.append("Functions/" + var + "_func/value=" + str(func))
call(args)
'''
Function for preparing order of accuracy plots
'''
def plot_order_accuracy(h_array, base, input_dir, optional_save_string='', boundary=''):
with cd(input_dir):
if boundary:
boundary = "_" + str(boundary)
if optional_save_string:
optional_save_string = "_" + str(optional_save_string)
variable_names = {}
data_file = str(int(1/h_array[0])) + boundary + "_" + str(base) + ".csv"
init_data = np.genfromtxt(data_file, dtype=float, names=True, delimiter=',')
for name in init_data.dtype.names:
if name != 'time':
variable_names[name] = np.array([])
variable_names[name] = np.append(variable_names[name], init_data[name][-1])
for h in h_array[1:]:
data_file = str(int(1/h)) + boundary + "_" + str(base) + ".csv"
data = np.genfromtxt(data_file, dtype=float, names=True, delimiter=',')
for name in variable_names:
variable_names[name] = np.append(variable_names[name], data[name][-1])
for name, data_array in variable_names.items():
z = np.polyfit(np.log(h_array), np.log(data_array), 1)
p = np.poly1d(z)
plt.plot(np.log(h_array), p(np.log(h_array)), '-')
equation = "y=%.1fx+%.1f" % (z[0],z[1])
plt.scatter(np.log(h_array), np.log(data_array), label=name + "; " + equation)
plt.legend()
save_string = "/home/lindsayad/Pictures/" + str(base) + boundary + optional_save_string
plt.savefig(save_string + ".eps", format='eps')
plt.savefig(save_string + ".png", format='png')
plt.close()
| mit |
HERA-Team/Monitor_and_Control | hera_mc/geo_location.py | 2 | 6951 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Keep track of geo-located stations."""
from astropy.time import Time
from sqlalchemy import Column, Float, String, BigInteger, ForeignKey, func
from . import MCDeclarativeBase, NotNull
from . import mc, cm_utils
class StationType(MCDeclarativeBase):
"""
Table to track/denote station type data categories in various ways.
Attributes
----------
station_type_name : String Column
Name of type class, Primary_key
prefix : String Column
String prefix to station type, elements of which are typically
characterized by <prefix><int>. Comma-delimit list if more than one.
Note that prefix is not in the primary_key, so there can be multiple
prefixes per type_name.
description : String Column
hort description of station type.
plot_marker : String Column
matplotlib marker type to use
"""
__tablename__ = 'station_type'
station_type_name = Column(String(64), primary_key=True)
prefix = NotNull(String(64))
description = Column(String(64))
plot_marker = Column(String(64))
def __repr__(self):
"""Define representation."""
return ('<subarray {self.station_type_name}: prefix={self.prefix} '
'description={self.description} marker={self.plot_marker}>'
.format(self=self))
class GeoLocation(MCDeclarativeBase):
"""
A table logging stations within HERA.
Attributes
----------
station_name : String Column
Colloquial name of station (which is a unique location on the ground).
This one shouldn't change. Primary_key
station_type_name : String Column
Name of station type of which it is a member.
Should match prefix per station_type table.
datum : String Column
Datum of the geoid.
tile : String Column
UTM tile
northing : Float Column
Northing coordinate in m
easting : Float Column
Easting coordinate in m
elevation : Float Column
Elevation in m
created_gpstime : BigInteger Column
The date when the station assigned by project.
"""
__tablename__ = 'geo_location'
station_name = Column(String(64), primary_key=True)
station_type_name = Column(String(64), ForeignKey(StationType.station_type_name),
nullable=False)
datum = Column(String(64))
tile = Column(String(64))
northing = Column(Float(precision='53'))
easting = Column(Float(precision='53'))
elevation = Column(Float)
created_gpstime = NotNull(BigInteger)
def gps2Time(self):
"""Add a created_date attribute -- an astropy Time object based on created_gpstime."""
self.created_date = Time(self.created_gpstime, format='gps')
def geo(self, **kwargs):
"""Add arbitrary attributes to object based on dict."""
for key, value in kwargs.items():
if key == 'station_name':
value = value.upper()
setattr(self, key, value)
def __repr__(self):
"""Define representation."""
return '<station_name={self.station_name} station_type={self.station_type_name} \
northing={self.northing} easting={self.easting} \
elevation={self.elevation}>'.format(self=self)
def update(session=None, data=None, add_new_geo=False):
"""
Update the geo_location table with some data.
Use with caution -- should usually use in a script which will do datetime
primary key etc.
Parameters
----------
session : session
session on current database. If session is None, a new session
on the default database is created and used.
data : str or list
[[station_name0,column0,value0],[...]]
where
station_nameN: station_name (starts with char)
values: corresponding list of values
add_new_geo : bool
allow a new entry to be made.
Returns
-------
bool
Flag if successful
"""
data_dict = format_check_update_request(data)
if data_dict is None:
print('No update - doing nothing.')
return False
close_session_when_done = False
if session is None: # pragma: no cover
db = mc.connect_mc_db()
session = db.sessionmaker()
close_session_when_done = True
for station_name in data_dict.keys():
geo_rec = session.query(GeoLocation).filter(
func.upper(GeoLocation.station_name) == station_name.upper())
num_rec = geo_rec.count()
make_update = False
if num_rec == 0:
if add_new_geo:
gr = GeoLocation()
make_update = True
else:
raise ValueError("{} does not exist and add_new_geo not enabled."
.format(station_name))
elif num_rec == 1:
if add_new_geo:
raise ValueError("{} exists and and_new_geo is enabled."
.format(station_name))
else:
gr = geo_rec.first()
make_update = True
if make_update:
for d in data_dict[station_name]:
setattr(gr, d[1], d[2])
session.add(gr)
session.commit()
cm_utils.log('geo_location update', data_dict=data_dict)
if close_session_when_done: # pragma: no cover
session.close()
return True
def format_check_update_request(request):
"""
Parse the update request for use in the update function.
Parameters
----------
request : str or list
station_name0:column0:value0, [station_name1:]column1:value1, [...] or list
station_nameN: first entry must have the station_name,
if it does not then propagate first station_name but
can't restart 3 then 2
columnN: name of geo_location column
valueN: corresponding new value
Returns
-------
dict
Parsed request for update
"""
if request is None:
return None
data = {}
if type(request) == str:
tmp = request.split(',')
data_to_proc = []
for d in tmp:
data_to_proc.append(d.split(':'))
else:
data_to_proc = request
if len(data_to_proc[0]) == 3:
station_name0 = data_to_proc[0][0]
for d in data_to_proc:
if len(d) == 2:
d.insert(0, station_name0)
elif len(d) != 3:
raise ValueError('Invalid format for update request.')
if d[0] in data.keys():
data[d[0]].append(d)
else:
data[d[0]] = [d]
else:
raise ValueError('Invalid parse request - need 3 parameters for at least first one.')
return data
| bsd-2-clause |
inclement/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
dl1ksv/gnuradio | gr-filter/examples/reconstruction.py | 5 | 4279 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
from gnuradio.fft import window
import sys
import numpy
try:
from gnuradio import channels
except ImportError:
print("Error: Program requires gr-channels.")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).")
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0 / fs
t = numpy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
window.WIN_BLACKMAN_hARRIS)
print("Filter length: ", len(proto_taps))
# Create a modulated signal
npwr = 0.01
data = numpy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(numpy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in range(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = numpy.array(src_snk.data()[1000:])
sout = numpy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pyplot.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(numpy.sqrt(nchans))
ncols = int(numpy.ceil(float(nchans) / float(nrows)))
f2 = pyplot.figure(2, figsize=(16,12), facecolor='w')
for n in range(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pyplot.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
tosolveit/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
gengliangwang/spark | python/pyspark/pandas/tests/test_dataframe_conversion.py | 15 | 11321 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import string
import tempfile
import unittest
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
from pyspark.testing.sqlutils import SQLTestUtils
class DataFrameConversionTest(PandasOnSparkTestCase, SQLTestUtils, TestUtils):
"""Test cases for "small data" conversion and I/O."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix=DataFrameConversionTest.__name__)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@property
def pdf(self):
return pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3])
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@staticmethod
def strip_all_whitespace(str):
"""A helper function to remove all whitespace from a string."""
return str.translate({ord(c): None for c in string.whitespace})
def test_to_html(self):
expected = self.strip_all_whitespace(
"""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
<tr><th>3</th><td>3</td><td>6</td></tr>
</tbody>
</table>
"""
)
got = self.strip_all_whitespace(self.psdf.to_html())
self.assert_eq(got, expected)
# with max_rows set
expected = self.strip_all_whitespace(
"""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
</tbody>
</table>
"""
)
got = self.strip_all_whitespace(self.psdf.to_html(max_rows=2))
self.assert_eq(got, expected)
@staticmethod
def get_excel_dfs(pandas_on_spark_location, pandas_location):
return {
"got": pd.read_excel(pandas_on_spark_location, index_col=0),
"expected": pd.read_excel(pandas_location, index_col=0),
}
@unittest.skip("openpyxl")
def test_to_excel(self):
with self.temp_dir() as dirpath:
pandas_location = dirpath + "/" + "output1.xlsx"
pandas_on_spark_location = dirpath + "/" + "output2.xlsx"
pdf = self.pdf
psdf = self.psdf
psdf.to_excel(pandas_on_spark_location)
pdf.to_excel(pandas_location)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.a.to_excel(pandas_on_spark_location)
pdf.a.to_excel(pandas_location)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
pdf = pd.DataFrame({"a": [1, None, 3], "b": ["one", "two", None]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
psdf.to_excel(pandas_on_spark_location, na_rep="null")
pdf.to_excel(pandas_location, na_rep="null")
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
pdf = pd.DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
psdf.to_excel(pandas_on_spark_location, float_format="%.1f")
pdf.to_excel(pandas_location, float_format="%.1f")
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.to_excel(pandas_on_spark_location, header=False)
pdf.to_excel(pandas_location, header=False)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
psdf.to_excel(pandas_on_spark_location, index=False)
pdf.to_excel(pandas_location, index=False)
dataframes = self.get_excel_dfs(pandas_on_spark_location, pandas_location)
self.assert_eq(dataframes["got"], dataframes["expected"])
def test_to_json(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_json(orient="records"), pdf.to_json(orient="records"))
def test_to_json_negative(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaises(NotImplementedError):
psdf.to_json(orient="table")
with self.assertRaises(NotImplementedError):
psdf.to_json(lines=False)
def test_read_json_negative(self):
with self.assertRaises(NotImplementedError):
ps.read_json("invalid", lines=False)
def test_to_json_with_path(self):
pdf = pd.DataFrame({"a": [1], "b": ["a"]})
psdf = ps.DataFrame(pdf)
psdf.to_json(self.tmp_dir, num_files=1)
expected = pdf.to_json(orient="records")
output_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("part-")]
assert len(output_paths) > 0
output_path = "%s/%s" % (self.tmp_dir, output_paths[0])
self.assertEqual("[%s]" % open(output_path).read().strip(), expected)
def test_to_json_with_partition_cols(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_json(self.tmp_dir, partition_cols="b", num_files=1)
partition_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("b=")]
assert len(partition_paths) > 0
for partition_path in partition_paths:
column, value = partition_path.split("=")
expected = pdf[pdf[column] == value].drop("b", axis=1).to_json(orient="records")
output_paths = [
path
for path in os.listdir("%s/%s" % (self.tmp_dir, partition_path))
if path.startswith("part-")
]
assert len(output_paths) > 0
output_path = "%s/%s/%s" % (self.tmp_dir, partition_path, output_paths[0])
with open(output_path) as f:
self.assertEqual("[%s]" % open(output_path).read().strip(), expected)
@unittest.skip("Pyperclip could not find a copy/paste mechanism for Linux.")
def test_to_clipboard(self):
pdf = self.pdf
psdf = self.psdf
self.assert_eq(psdf.to_clipboard(), pdf.to_clipboard())
self.assert_eq(psdf.to_clipboard(excel=False), pdf.to_clipboard(excel=False))
self.assert_eq(
psdf.to_clipboard(sep=";", index=False), pdf.to_clipboard(sep=";", index=False)
)
def test_to_latex(self):
pdf = self.pdf
psdf = self.psdf
self.assert_eq(psdf.to_latex(), pdf.to_latex())
self.assert_eq(psdf.to_latex(col_space=2), pdf.to_latex(col_space=2))
self.assert_eq(psdf.to_latex(header=True), pdf.to_latex(header=True))
self.assert_eq(psdf.to_latex(index=False), pdf.to_latex(index=False))
self.assert_eq(psdf.to_latex(na_rep="-"), pdf.to_latex(na_rep="-"))
self.assert_eq(psdf.to_latex(float_format="%.1f"), pdf.to_latex(float_format="%.1f"))
self.assert_eq(psdf.to_latex(sparsify=False), pdf.to_latex(sparsify=False))
self.assert_eq(psdf.to_latex(index_names=False), pdf.to_latex(index_names=False))
self.assert_eq(psdf.to_latex(bold_rows=True), pdf.to_latex(bold_rows=True))
self.assert_eq(psdf.to_latex(decimal=","), pdf.to_latex(decimal=","))
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(psdf.to_latex(encoding="ascii"), pdf.to_latex(encoding="ascii"))
def test_to_records(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pdf = pd.DataFrame({"A": [1, 2], "B": [0.5, 0.75]}, index=["a", "b"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_records(), pdf.to_records())
self.assert_eq(psdf.to_records(index=False), pdf.to_records(index=False))
self.assert_eq(psdf.to_records(index_dtypes="<S2"), pdf.to_records(index_dtypes="<S2"))
def test_from_records(self):
# Assert using a dict as input
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3]}), pd.DataFrame.from_records({"A": [1, 2, 3]})
)
# Assert using a list of tuples as input
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)]), pd.DataFrame.from_records([(1, 2), (3, 4)])
)
# Assert using a NumPy array as input
self.assert_eq(ps.DataFrame.from_records(np.eye(3)), pd.DataFrame.from_records(np.eye(3)))
# Asserting using a custom index
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)], index=[2, 3]),
pd.DataFrame.from_records([(1, 2), (3, 4)], index=[2, 3]),
)
# Assert excluding excluding column(s)
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, exclude=["B"]),
pd.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, exclude=["B"]),
)
# Assert limiting to certain column(s)
self.assert_eq(
ps.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, columns=["A"]),
pd.DataFrame.from_records({"A": [1, 2, 3], "B": [1, 2, 3]}, columns=["A"]),
)
# Assert limiting to a number of rows
self.assert_eq(
ps.DataFrame.from_records([(1, 2), (3, 4)], nrows=1),
pd.DataFrame.from_records([(1, 2), (3, 4)], nrows=1),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_conversion import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
cojacoo/testcases_echoRD | gen_test_coR4.py | 1 | 4306 | import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_column
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_g63_nomac',experimental=True)
mc = mcp.mcpick_out(mc,'g63_nomac.pickle')
runname='gen_test_coR4'
idx=int(np.shape(mc.soilgrid)[1]/2)
mc.soilgrid[:,idx-1:idx+1]=13
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart-=340
precTS.tend-=340
precTS.intense=2.*0.063*60./1000.# intensity in m3/s
#use modified routines for binned retention definitions
#mc.part_sizefac=100
mc.gridcellA=abs(mc.mgrid.vertfac*mc.mgrid.latfac)
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
#for column:
total_volume=np.pi*0.5**3
mc.particleV=total_volume/(mc.mgrid.vertgrid[0]*mc.mgrid.latgrid[0]*(2*mc.part_sizefac))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects='column2'
mc.colref=True
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_column(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/X',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([leftover,drained,t,TSstore,i]), handle, protocol=2)
| gpl-3.0 |
bmmalone/pymisc-utils | pyllars/mpl_utils.py | 1 | 32529 | """
This module contains a number of helper functions for matplotlib.
"""
import itertools
import matplotlib
import matplotlib.colors
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import pyllars.utils as utils
import pyllars.validation_utils as validation_utils
import logging
logger = logging.getLogger(__name__)
_VALID_AXIS_VALUES = {
'both',
'x',
'y'
}
_X_AXIS_VALUES = {
'both',
'x'
}
_Y_AXIS_VALUES = {
'both',
'y'
}
def add_fontsizes_to_args(args,
legend_title_fontsize=12,
legend_fontsize=10,
title_fontsize=20,
label_fontsize=15,
ticklabels_fontsize=10):
""" Add reasonable default fontsize values to the arguments
"""
args.legend_title_fontsize = legend_title_fontsize
args.legend_fontsize = legend_fontsize
args.title_fontsize = title_fontsize
args.label_fontsize = label_fontsize
args.ticklabels_fontsize = ticklabels_fontsize
def set_legend_title_fontsize(ax, fontsize):
""" Set the font size of the title of the legend.
Parameters
----------
ax: mpl.Axis
The axis
fontsize: int, or string mpl recognizes
The size of the legend title
Returns
-------
None, but the legend title fontsize is updated
"""
legend = ax.legend_
plt.setp(legend.get_title(),fontsize=fontsize)
def set_legend_fontsize(ax, fontsize):
""" Set the font size of the items of the legend.
Parameters
----------
ax: mpl.Axis
The axis
fontsize: int, or string mpl recognizes
The size of the legend text
Returns
-------
None, but the legend text fontsize is updated
"""
legend = ax.legend_
plt.setp(legend.get_texts(),fontsize=fontsize)
def set_title_fontsize(ax, fontsize):
""" Set the font size of the title of the axis.
Parameters
----------
ax: mpl.Axis
The axis
fontsize: int, or string mpl recognizes
The size of the title font
Returns
-------
None, but the title fontsize is updated
"""
ax.title.set_fontsize(fontsize=fontsize)
def set_label_fontsize(ax, fontsize, axis='both'):
""" Set the font size of the label of the axis.
Parameters
----------
ax: mpl.Axis
The axis
fontsize: int, or string mpl recognizes
The size of the title font
which: string
Should be 'both', 'x', or 'y'
Returns
-------
None, but the respective label fontsizes are updated
"""
if (axis == 'both') or (axis=='x'):
l = ax.xaxis.label
l.set_fontsize(fontsize)
if (axis == 'both') or (axis=='y'):
l = ax.yaxis.label
l.set_fontsize(fontsize)
def center_splines(ax):
""" This function places the splines of the given axis in the center of the
plot. This is useful for things like scatter plots where (0,0) should be
in the center of the plot.
Parameters
----------
ax : mpl.Axis
The axis
Returns
-------
None, but the splines are updated
"""
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_label_coords(0.5, 0)
ax.yaxis.set_label_coords(-0.05, 0.5)
def hide_first_y_tick_label(ax):
""" Hide the first tick label on the y-axis.
Parameters
----------
ax: mpl.Axis
The axis
Returns
-------
None, but the tick label is hidden
"""
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
def hide_tick_labels_by_text(ax, to_remove_x=[], to_remove_y=[]):
""" Hide tick labels which match the given values.
Parameters
----------
ax: mpl.Axis
The axis
to_remove_{x,y}: list-like of strings
The values to remove
"""
xticks = ax.xaxis.get_major_ticks()
num_xticks = len(xticks)
keep_x = [i for i in range(num_xticks) if xticks[i].label1.get_text() not in to_remove_x]
yticks = ax.yaxis.get_major_ticks()
num_yticks = len(yticks)
keep_y = [i for i in range(num_yticks) if yticks[i].label1.get_text() not in to_remove_y]
hide_tick_labels(ax, keep_x=keep_x, keep_y=keep_y)
def hide_tick_labels(ax, keep_x=[], keep_y=[], axis='both'):
""" Hide the tick labels on both axes. Optionally, some can be preserved.
Parameters
----------
ax : mp.Axis
The axis
keep_{x,y} : list-like of ints
The indices of any x-axis ticks to keep. The numbers are passed directly
as indices to the xticks array.
axis : string in {'both', 'x', 'y'}
Axis of the tick labels to hide
Returns
-------
None, but the tick labels of the axis are removed, as specified
"""
validation_utils.validate_in_set(axis, _VALID_AXIS_VALUES, "axis")
if axis in _X_AXIS_VALUES:
xticks = ax.xaxis.get_major_ticks()
for xtick in xticks:
xtick.label1.set_visible(False)
for x in keep_x:
xticks[x].label1.set_visible(True)
if axis in _Y_AXIS_VALUES:
yticks = ax.yaxis.get_major_ticks()
for ytick in yticks:
ytick.label1.set_visible(False)
for y in keep_y:
yticks[y].label1.set_visible(True)
def set_ticklabels_fontsize(ax, fontsize, axis='both', which='major'):
""" Set the font size of the tick labels.
Parameters
----------
ax: mpl.Axis
The axis
fontsize: int, or string mpl recognizes
The size of the ticklabels
axis, which: strings
Values passed to ax.tick_params. Please see the mpl documentation for
more details.
Returns
-------
None, but the ticklabel fontsizes are updated
"""
ax.tick_params(axis=axis, which=which, labelsize=fontsize)
VALID_AXIS_VALUES = {'x', 'y', 'both'}
VALID_WHICH_VALUES = {'major', 'minor', 'both'}
def set_ticklabel_rotation(ax, rotation, axis='x', which='both'):
""" Set the rotation of the tick labels.
Parameters
----------
ax: mpl.Axis
The axis
rotation: int, or a string mpl recognizes
The rotation of the labels
axis: 'x', 'y', 'both'
The axis whose tick labels will be rotated
which: 'major', 'minor', 'both'
Which of the tick labels to affect
Returns
-------
None, but the ticklabels are rotated
"""
if axis not in VALID_AXIS_VALUES:
msg = "{} is not a valid axis value".format(axis)
raise ValueError(msg)
if which not in VALID_WHICH_VALUES:
msg = "{} is not a valid which value".format(which)
raise ValueError(msg)
adjust_xaxis = (axis == 'x') or (axis == 'both')
adjust_yaxis = (axis == 'y') or (axis == 'both')
adjust_major = (which == 'major') or (which == 'both')
adjust_minor = (which == 'minor') or (which == 'both')
if adjust_xaxis:
xticklabels = []
if adjust_major:
xticklabels.extend(ax.xaxis.get_majorticklabels())
if adjust_minor:
xticklabels.extend(ax.xaxis.get_minorticklabels())
plt.setp(xticklabels, rotation=rotation)
if adjust_yaxis:
yticklabels = []
if adjust_major:
yticklabels.extend(ax.yaxis.get_majorticklabels())
if adjust_minor:
yticklabels.extend(ax.yaxis.get_minorticklabels())
plt.setp(yticklabels, rotation=rotation)
def remove_top_and_right_splines(ax):
""" This function removes the spines on the top and right of the axis.
Parameters
----------
ax : mpl.Axis
The axis
Returns
-------
None, but the splines and ticks of the axis are updated
"""
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def plot_roc_curve(tpr, fpr, auc=None, field_names=None, out=None, cmaps=None, alphas=None,
title="Receiver operating characteristic curves", font_size=20, legend_font_size=15,
top_adjustment=0.9, xlabel="False positive rate", ylabel="True positive rate"):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
fig, ax = plt.subplots()
if alphas is None:
alphas = [np.ones(len(tpr[0]))] * len(tpr)
if cmaps is None:
cmaps = [plt.cm.Blues] * len(alphas)
elif len(cmaps) != len(alphas):
msg = "The ROC curve must have the same number of cmaps as alphas"
raise ValueError(msg)
for i in range(len(tpr)):
l = ""
if field_names is not None:
l += field_names[i]
if auc is not None:
l += " "
l += "AUC: {:.2f}".format(auc[i])
color = 'k' # cmap(i/len(tpr))
for j in range(1, len(fpr[i])):
points_y = [tpr[i][j-1], tpr[i][j]]
points_x = [fpr[i][j-1], fpr[i][j]]
# this plots the lines connecting each point
ax.plot( points_x, points_y, color=color, zorder=1 )
ax.scatter(fpr[i], tpr[i], label=l, linewidths=0.1, c=alphas[i], cmap=cmaps[i], zorder=2)
ax.plot([0,1], [0,1])
ax.set_aspect('equal')
ax.set_xlim((0,1))
ax.set_ylim((0,1))
ax.legend(loc='lower right', fontsize=legend_font_size)
if title != None and len(title) > 0:
fig.suptitle(title, fontsize=font_size)
ax.set_xlabel(xlabel, fontsize=font_size)
ax.set_ylabel(ylabel, fontsize=font_size)
fig.tight_layout()
fig.subplots_adjust(top=top_adjustment)
if out is not None:
plt.savefig(out, bbox_inches='tight')
def plot_confusion_matrix(
confusion_matrix,
ax=None,
show_cell_labels=True,
show_colorbar=True,
title="Confusion matrix",
cmap=None,
true_tick_labels = None,
predicted_tick_labels = None,
ylabel="True labels",
xlabel="Predicted labels",
title_font_size=20,
label_font_size=15,
true_tick_rotation=None,
predicted_tick_rotation=None,
out=None):
""" Plot the given confusion matrix
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
# a hack to give cmap a default without importing pyplot for arguments
if cmap == None:
cmap = plt.cm.Blues
mappable = ax.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)
if show_colorbar:
fig.colorbar(mappable)
ax.grid(False)
true_tick_marks = np.arange(confusion_matrix.shape[0])
ax.set_ylabel(ylabel, fontsize=label_font_size)
ax.set_yticks(true_tick_marks)
if true_tick_labels is None:
true_tick_labels = list(true_tick_marks)
ax.set_yticklabels(
true_tick_labels,
fontsize=label_font_size,
rotation=true_tick_rotation
)
predicted_tick_marks = np.arange(confusion_matrix.shape[1])
ax.set_xlabel(xlabel, fontsize=label_font_size)
ax.set_xticks(predicted_tick_marks)
if predicted_tick_labels is None:
predicted_tick_labels = list(predicted_tick_marks)
ax.set_xticklabels(
predicted_tick_labels,
fontsize=label_font_size,
rotation=predicted_tick_rotation
)
if show_cell_labels:
# the choice of color is based on this SO thread:
# https://stackoverflow.com/questions/2509443
color_threshold = 125
s = confusion_matrix.shape
it = itertools.product(range(s[0]), range(s[1]))
for i,j in it:
val = confusion_matrix[i,j]
cell_color = cmap(mappable.norm(val))
# see the SO thread mentioned above
color_intensity = (
(255*cell_color[0] * 299) +
(255*cell_color[1] * 587) +
(255*cell_color[2] * 114)
) / 1000
font_color = "white"
if color_intensity > color_threshold:
font_color = "black"
text = val
ax.text(j, i, text, ha='center', va='center', color=font_color,
size=label_font_size)
ax.set_title(title, fontsize=title_font_size)
fig.tight_layout()
if out is not None:
plt.savefig(out, bbox_inches='tight')
def plot_venn_diagram(sets, ax=None, set_labels=None, weighted=False, use_sci_notation=False,
labels_fontsize=14, counts_fontsize=12, sci_notation_limit=999):
""" This function is a wrapper around matplotlib_venn. It most just makes
setting the fonts and and label formatting a bit easier.
Args:
sets: either a dictionary, a list-like of two sets or a list-like of
three sets. If a dictionary, it must follow the conventions of
matplotlib_venn. If a dictionary is given, the number of sets
will be guessed based on the length of a random key.
ax (mpl.axis): an axis for drawing
set_labels (list of strings): the label for each set. The order
of the labels must match the order of the sets
weighted (bool): whether to draw a weighted or unweighted diagram
use_sci_notation (bool): whether to convert numbers to scientific
notation
sci_notation_limit (float): the maximum number to show before
switching to scientific notation
labels_fontsize, counts_fontsize (int): the respective fontsizes
Returns:
matplotlib_venn.VennDiagram: the diagram
Imports:
matplotlib_venn
"""
import matplotlib_venn
key_len = 0
if isinstance(sets, dict):
random_key = list(sets.keys())[0]
key_len = len(random_key)
if (len(sets) == 2) or (key_len == 2):
if weighted:
v = matplotlib_venn.venn2(sets, ax=ax, set_labels=set_labels)
else:
v = matplotlib_venn.venn2_unweighted(sets, ax=ax, set_labels=set_labels)
elif (len(sets) == 3) or (key_len == 3):
if weighted:
v = matplotlib_venn.venn3(sets, ax=ax, set_labels=set_labels)
else:
v = matplotlib_venn.venn3_unweighted(sets, ax=ax, set_labels=set_labels)
else:
msg = "Only two or three sets are supported"
raise ValueError(msg)
for l in v.set_labels:
if l is not None:
l.set_fontsize(labels_fontsize)
for l in v.subset_labels:
if l is None:
continue
l.set_fontsize(counts_fontsize)
if use_sci_notation:
val = int(l.get_text())
if val > sci_notation_limit:
val = "{:.0E}".format(val)
l.set_text(val)
return v
def create_simple_bar_chart(ax,
bars,
labels=None,
colors=None, # this will not accept an (rgba list-like specification)
xticklabels='default',
xticklabels_rotation='vertical',
xlabel=None,
spacing=0,
ymin=None,
ymax=None,
ylabel=None,
use_log_scale=False,
hide_first_ytick=True,
show_legend=False,
title=None,
fontsize=12,
label_fontsize=12,
legend_fontsize=12,
title_fontsize=12,
tick_offset=0.5
):
mpl_bars = []
# first, handle the bars
# TODO: check that the bar arrays are all the same length
xticks = np.arange(len(bars[0]))
width = 1 - 2*spacing
width /= len(bars)
# figure out what to do with "colors"
if colors is None:
colors = plt.cm.Blues
if isinstance(colors, matplotlib.colors.Colormap):
# then use "num_bars" equi-distant colors
ls = np.linspace(0, 1, len(bars))
color_vals = [colors(c) for c in ls]
colors = color_vals
elif utils.is_sequence(colors):
# make sure this is the correct size
if len(colors) != len(bars):
msg = ("The number of colors ({}) and the number of bars({}) does "
"not match.".format(len(colors), len(bars)))
raise ValueError(msg)
else:
# we assume color is a scalar, and we will use the same color
# for all bars
colors = [colors] * len(bars)
if labels is None:
labels = np.full(len(bars), "", dtype=object)
for i, bar in enumerate(bars):
xpos = xticks + i*width
if len(bar) < len(xpos):
xpos = xpos[:len(bar)]
mpl_bar = ax.bar(xpos, bar, width=width, color=colors[i], label=labels[i])
mpl_bars.append(mpl_bar)
# now the x-axis
if isinstance(xticklabels, str):
if xticklabels == "default":
xticklabels = xticks
tick_offset = tick_offset - spacing
if xticklabels is not None:
ax.set_xticks(xticks+tick_offset)
ax.set_xticklabels(xticklabels, fontsize=fontsize,
rotation=xticklabels_rotation)
else:
ax.tick_params(axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
ax.set_xlim((-width, len(xticks)+width/2))
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=label_fontsize)
# and the y-axis
if use_log_scale:
ax.set_yscale('log')
if ymin is None:
ymin = 0
if use_log_scale:
ymin=1
if ymax is None:
ymax = 2*max(max(x) for x in bars)
ax.set_ylim((ymin, ymax))
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=label_fontsize)
if hide_first_ytick:
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
# and the legend
if show_legend:
ax.legend(fontsize=legend_fontsize)
# and the title
if title is not None:
ax.set_title(title, fontsize=title_fontsize)
return ax
def get_diff_counts(data_np):
""" This function extracts the differential counts necessary for visualization
with stacked_bar_graph. It assumes the counts for each bar are given as a
separate row in the numpy 2-d array. Within the rows, the counts are ordered
in ascending order. That is, the first column contains the smallest count, the
second column contains the next-smallest count, etc.
For example, if the columns represnt some sort of filtering approach, then the
last column would contain the unfiltered count, the next-to-last column
would give the count after the first round of filtering, etc.
"""
# add an extra column so the diff counts will work
zeros = np.zeros((data_np.shape[0], 1))
data_np = np.append(zeros, data_np, axis=1)
# get the diffs so the stacks work correctly
diff = np.diff(data_np)
return diff
def create_stacked_bar_graph(
ax, # axes to plot onto
data, # data to plot
colors=plt.cm.Blues, # color map for each level or list of colors
x_tick_labels = None, # bar specific labels
stack_labels=None, # the text for the legend
y_ticks = None, # information used for making y ticks
y_tick_labels=None,
hide_first_ytick=True,
edge_colors=None, # colors for edges
showFirst=-1, # only plot the first <showFirst> bars
scale=False, # scale bars to same height
widths=None, # set widths for each bar
heights=None, # set heights for each bar
y_title=None, # label for x axis
x_title=None, # label for y axis
gap=0., # gap between bars
end_gaps=False, # allow gaps at end of bar chart (only used if gaps != 0.)
show_legend=True, # whether to show the legend
legend_loc="best", # if using a legend, its location
legend_bbox_to_anchor=None, # for the legend
legend_ncol=-1, # for the legend
log=False, # whether to use a log scale
font_size=8, # the font size to use for the tick labels
label_font_size=12, # the font size for the labels
legend_font_size=8
):
""" Create a stacked bar plot with the given characteristics.
This code is adapted from code by Michael Imelfort.
"""
#------------------------------------------------------------------------------
# data fixeratering
# make sure this makes sense
if showFirst != -1:
showFirst = np.min([showFirst, np.shape(data)[0]])
data_copy = np.copy(data[:showFirst]).transpose().astype('float')
data_shape = np.shape(data_copy)
if heights is not None:
heights = heights[:showFirst]
if widths is not None:
widths = widths[:showFirst]
showFirst = -1
else:
data_copy = np.copy(data).transpose()
data_shape = np.shape(data_copy)
# determine the number of bars and corresponding levels from the shape of the data
num_bars = data_shape[1]
levels = data_shape[0]
if widths is None:
widths = np.array([1] * num_bars)
x = np.arange(num_bars)
else:
if not utils.is_sequence(widths):
widths = np.full(num_bars, widths)
print("widths: ", widths)
x = [0]
for i in range(1, len(widths)):
#x.append(x[i-1] + (widths[i-1] + widths[i])/2)
x.append(x[i-1] + widths[i])
# stack the data --
# replace the value in each level by the cumulative sum of all preceding levels
data_stack = np.reshape([float(i) for i in np.ravel(np.cumsum(data_copy, axis=0))], data_shape)
# scale the data is needed
if scale:
data_copy /= data_stack[levels-1]
data_stack /= data_stack[levels-1]
if heights is not None:
print("WARNING: setting scale and heights does not make sense.")
heights = None
elif heights is not None:
data_copy /= data_stack[levels-1]
data_stack /= data_stack[levels-1]
for i in np.arange(num_bars):
data_copy[:,i] *= heights[i]
data_stack[:,i] *= heights[i]
# plot
# if we were given a color map, convert it to a list of colors
if isinstance(colors, matplotlib.colors.Colormap):
colors = [ colors(i/levels) for i in range(levels)]
if edge_colors is None:
edge_colors = colors
elif not utils.is_sequence(edge_colors):
edge_colors = np.full(levels, edge_colors, dtype=object)
elif len(edge_colors) != len(levels):
msg = "The number of edge_colors must match the number of stacks."
raise ValueError(msg)
# take cae of gaps
gapd_widths = [i - gap for i in widths]
if stack_labels is None:
stack_labels = np.full(levels, '', dtype=object)
# bars
bars = []
bar = ax.bar(x,
data_stack[0],
color=colors[0],
edgecolor=edge_colors[0],
width=gapd_widths,
linewidth=0.5,
align='center',
label=stack_labels[0],
log=log
)
bars.append(bar)
for i in np.arange(1,levels):
bar = ax.bar(x,
data_copy[i],
bottom=data_stack[i-1],
color=colors[i],
edgecolor=edge_colors[i],
width=gapd_widths,
linewidth=0.5,
align='center',
label=stack_labels[i],
log=log
)
bars.append(bar)
# borders
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
#ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# make ticks if necessary
if y_ticks is not None:
ax.set_yticks(y_ticks)
if y_tick_labels is not None:
ax.set_yticklabels(y_tick_labels, fontsize=font_size)
if hide_first_ytick:
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
else:
ax.tick_params(
axis='y',
which='both',
left='off',
right='off',
labelright='off',
labelleft='off')
if x_tick_labels is not None:
ax.tick_params(axis='x', which='both', labelsize=font_size, direction="out")
ax.xaxis.tick_bottom()
ax.set_xticks(x)
ax.set_xticklabels(x_tick_labels, rotation='vertical')
else:
ax.set_xticks([])
ax.set_xticklabels([])
# limits
if end_gaps:
ax.set_xlim(-1.*widths[0]/2. - gap/2., np.sum(widths)-widths[0]/2. + gap/2.)
else:
ax.set_xlim(-1.*widths[0]/2. + gap/2., np.sum(widths)-widths[0]/2. - gap/2.)
ymin = 0
if log:
ymin = 1
# labels
if x_title is not None:
ax.set_xlabel(x_title, fontsize=label_font_size)
if y_title is not None:
ax.set_ylabel(y_title, fontsize=label_font_size)
# legend
if show_legend:
if legend_ncol < 1:
legend_ncol = len(stack_labels)
lgd = ax.legend(loc=legend_loc, bbox_to_anchor=legend_bbox_to_anchor, ncol=legend_ncol,
fontsize=legend_font_size)
return bars
def plot_simple_scatter(
x, y,
ax=None,
equal_aspect=True,
set_lim=True,
show_y_x_line=True,
xy_line_kwargs={},
**kwargs):
""" Plot a simple scatter plot of x vs. y on `ax`
If `fig` and `ax` are not given, then will be created.
See the matplotlib documentation for more keyword arguments and details:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html
Parameters
----------
x,y : array-like of numbers
The values to plot
ax : mpl.Axis
An axis for plotting. If this is not given, then a figure and axis will
be created.
equal_aspect : bool
Whether to set the aspect of the axis to `equal`
set_lim : bool
Whether to automatically set the min and max axis limits
show_y_x_line : bool
Whether to draw the y=x line. This will look weird if `set_lim` is False.
xy_line_kwargs : dict
keyword arguments for plotting the y=x line, if it plotting
**kwargs : <key>=<value> pairs
Additional keyword arguments to pass to the plot function. Some useful
keyword arguments are:
* `label` : the label for a legend
* `marker` : https://matplotlib.org/examples/lines_bars_and_markers/marker_reference.html
Returns
-------
fig, ax : mpl.Figure and mpl.Axis
The figure and axis on which the scatter points were plotted
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
ax.scatter(x,y, **kwargs)
min_val = min(min(x), min(y))
max_val = max(max(x), max(y))
lim = (min_val, max_val)
if set_lim:
ax.set_xlim(lim)
ax.set_ylim(lim)
if show_y_x_line:
ax.plot(lim, lim, **xy_line_kwargs)
if equal_aspect:
ax.set_aspect('equal')
return fig, ax
def plot_trend_line(ax, x, intercept, slope, power, **kwargs):
""" Draw the trend line implied by the given coefficients.
Parameters
----------
ax : mpl.Axis
The axis on which the line will be drawn
x : list of floats
The points at which the line will be drawn
intercept, slope, power : floats
The coefficients of the trend line
**kwargs : <key>=<value> pairs
Keyword arguments to pass to the ax.plot function (color, etc.)
Returns
-------
None, but the line will be drawn on the axis
"""
x = np.sort(x)
y = power * x ** 2 + slope * x + intercept
#Plot trendline
ax.plot(x, y, **kwargs)
def draw_rectangle(ax, base_x, base_y, width, height, center_x=False,
center_y=False, **kwargs):
""" Draw a rectangle at the given x and y coordinates. Optionally, these
can be adjusted such that they are the respective centers rather than edge
values.
Parameters
----------
ax: mpl.Axis
The axis on which the rectangle will be drawn
base_{x,y}: number
The base x and y coordinates
width, height: number
The width (change in x) and height (change in y) of the rectangle
center_{x,y}: bool
Whether to adjust the x and y coordinates such that they become the
center rather than lower left. In particular, if center_x is True, then
base_x will be shifted left by width/2; likewise, if center_y is True,
then base_y will be shifted down by height/2.
kwargs: key=value pairs
Additional keywords are passed to the patches.Rectangle constructor
base
"""
y_offset = 0
if center_y:
y_offset = height/2
x_offset = 0
if center_x:
x_offset = width/2
y = base_y - y_offset
x = base_x - x_offset
ax.add_patch(patches.Rectangle((x,y), width, height, **kwargs))
def plot_sorted_values(values, ymin=None, ymax=None, ax=None, scale_x=False, **kwargs):
""" Sort `values` and plot them
Parameters
----------
values : list-like of numbers
The values to plot
y{min,max} : floats
The min and max values for the y-axis. If not given, then these
default to the minimum and maximum values in the list.
scale_x : bool
If True, then the `x` values will be equally-spaced between 0 and 1.
Otherwise, they will be the values 0 to len(values)
ax : mpl.Axis
An axis for plotting. If this is not given, then a figure and axis will
be created.
**kwargs : <key>=<value> pairs
Additional keyword arguments to pass to the plot function. Some useful
keyword arguments are:
* `label` : the label for a legend
* `lw` : the line width
* `ls` : https://matplotlib.org/gallery/lines_bars_and_markers/line_styles_reference.html
* `marker` : https://matplotlib.org/examples/lines_bars_and_markers/marker_reference.html
Returns
-------
fig : mpl.Figure
The Figure associated with `ax`, or a new Figure
ax : mpl.Axis
Either `ax` or a new Axis
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
y = np.sort(values)
if scale_x:
x = np.linspace(0,1, len(y))
else:
x = np.arange(len(y))
ax.plot(x,y, **kwargs)
if ymin is None:
ymin = y[0]
if ymax is None:
ymax = y[-1]
ax.set_ylim((ymin, ymax))
ax.set_xlim((0, len(y)))
return fig, ax
| mit |
gaubert/boxme | src/sandbox/test_dynamic_draw.py | 1 | 1219 | '''
Created on Aug 28, 2014
@author: gaubert
'''
import numpy as np
import time
from matplotlib import pyplot as plt
plt.ion # set plot to animated
ydata = [0] * 50
ax1=plt.axes
# make plot
line, = plt.plot(ydata)
plt.ylim([10,40])
# start data collection
all_data = ["0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10","0.0","1.1","2.2","3.3","4.4","5.5","6.6","7.7","8.8","9.9","10.10"]
while len(all_data) > 0:
# port and strip line endings
data = all_data.pop(0)
if len(data.split(".")) == 2:
ymin = float(min(ydata))-10
ymax = float(max(ydata))+10
plt.ylim([ymin,ymax])
ydata.append(data)
del ydata[0]
line.set_xdata(np.arange(len(ydata)))
line.set_ydata(ydata) # update the data
plt.draw # update the plot
print("sleep for 5s")
time.sleep(0.05)
| gpl-3.0 |
mjudsp/Tsallis | examples/datasets/plot_iris_dataset.py | 35 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
vvoelz/ratespec | RateSpecTools.py | 1 | 6597 | #!/usr/bin/env python
import os, sys, glob
import scipy
from scipy.linalg import pinv
import numpy as np
import matplotlib
from pylab import *
def rangeLog(min, max, n):
"""Return an array of n log-spaces values from min to max.
NOTE: All values must be greater than 0"""
logmin, logmax = log(min), log(max)
return np.exp( np.arange( logmin, logmax, (logmax-logmin)/float(n) ) )
def rangeLin(min, max, n):
"""Return an array of n linear-spaced values from min to max."""
return np.arange( min, max, (max-min)/n )
def Xmatrix(k,t,w, standardizeData=True):
"""Return X matrix Xij = [ exp(-k_j*t_i); I*w^(1/2) ] from arrays k and t. """
K, N = len(k), len(t)
X = np.zeros( (N+K,K) )
for i in range(N):
for j in range(K):
X[i,j] = np.exp(-1.0*k[j]*t[i])
for i in range(K):
X[i+len(t),i] = w**0.5
Xmean = (X[0:N,:]).mean(axis=0)
if standardizeData:
for j in range(K):
X[0:N,j] = X[0:N,j] - Xmean[j]
return X, Xmean
def Xsubmatrix(k,t, standardizeData=True):
"""Return X matrix Xij = [ exp(-k_j*t_i)] from arrays k and t. """
K, N = len(k), len(t)
X = np.zeros( (N,K) )
for i in range(N):
for j in range(K):
X[i,j] = np.exp(-1.0*k[j]*t[i])
Xmean = X.mean(axis=0)
if standardizeData:
for j in range(K):
X[:,j] = X[:,j] - Xmean[j]
return X, Xmean
def SumSpectra(A, Rates, Times, offset=0.0):
"""Return the sum of the exponential relaxations.
If the data is standardized, an offset constant zero rate must be provided."""
#print '*** in SumSpectra: ***'
result = np.zeros( Times.shape )
for i in range(len(Rates)):
#print '***', Rates[i]
result += A[i]*np.exp( -1.0*Rates[i]*Times)
return result
def testData(nTimes = 1000, taus = [1.0e-6, 1.0e-4, 5.0e-3], amps = [0.3, 0.3, 0.4], sigma = 0.05, linear=False):
"""
nTimes = 727 # same number as W55F dataset
taus = [1.0e-6, 1.0e-4, 5.0e-3] # timescales for a test data curve
amps = [0.3, 0.3, 0.4] # ampltudes of each relaxation
sigma = 0.05 # add artifical noise to the data
"""
if linear:
Times = rangeLin(1.0e-7, 1.0e-3, nTimes) # in seconds
else:
Times = rangeLog(1.0e-9, 1.0, nTimes) # in seconds
Data = np.zeros( Times.shape )
for i in range(len(taus)):
Data += amps[i]*np.exp(-1.*Times/taus[i])
Data += sigma*np.random.randn( len(Data) )
return Times, Data
def testStretchedExpData(nTimes = 1000, beta = 0.5, sigma = 0.05, linear=False):
"""
nTimes = 727 # same number as W55F dataset
beta = the stretching exponenent (should be between 0 and 1)
sigma = 0.05 # add artifical noise to the data
"""
if linear:
Times = rangeLin(1.0e-3, 1.0e+3, nTimes) # in seconds
else:
Times = rangeLog(1.0e-3, 1.0e+3, nTimes) # in seconds
Data = np.exp(-1.*(Times**beta))
Data += sigma*np.random.randn( len(Data) )
return Times, Data
def fitRateSpectrum(Times, Data, Rates, w, Lnorm='ridge', standardizeData=True, CalcNdof=False, rho=0.5):
"""Using pseudo-inverse, with Tikhonov regularization (w parameter) to solve the inverse lapace tranform.
Returns coefficients A_k, residual sum of squares (rss), and number of degrees of freedom, for each relaxation rate.
"""
if Lnorm == 'lasso':
# Use L1-norm Lasso regression
try:
from scikits.learn.linear_model import Lasso
except:
print 'Error: could NOT import Lasso from scikits.learn.linear_model. Using L2 norm (ridge).'
Lnorm = 'ridge'
if Lnorm == 'enet':
# Use L1-L2-mixture norm Lasso regression
try:
from scikits.learn.linear_model import ElasticNet
except:
print 'Error: could NOT import ElasticNet from scikits.learn.linear_model. Using L2 norm (ridge).'
Lnorm = 'ridge'
if Lnorm == 'lasso':
lasso = Lasso(alpha = w, fit_intercept=False) # assume the data is already "centered" -- i.e. no zero rate
X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)
#print 'X.shape', X.shape, 'Data.shape', Data.shape
lasso.fit(X, Data, max_iter=1e6, tol=1e-7)
A = lasso.coef_
# Compute "residual sum of squares" (note loss function is different for L1-norm)
y_pred_lasso = lasso.predict(X)
diff = y_pred_lasso - Data
elif Lnorm == 'enet':
# NOTE: The convention for rho is backwards in scikits.learn, instead of rho we must send (1-rho)
enet = ElasticNet(alpha = w, rho=(1.-rho), fit_intercept=False) # assume the data is already "centered" -- i.e. no zero rate
X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)
#print 'X.shape', X.shape, 'Data.shape', Data.shape
#enet.fit(X, Data, max_iter=1e6, tol=1e-7)
enet.fit(X, Data, max_iter=1e6, tol=1e-3) # for testing
A = enet.coef_
# Compute "residual sum of squares" (note loss function is different for L1-norm)
y_pred_enet = enet.predict(X)
diff = y_pred_enet - Data
elif Lnorm == 'ridge':
X, Xmean = Xmatrix(Rates, Times, w, standardizeData=standardizeData )
Xinv = linalg.pinv(X)
y = np.array( Data.tolist() + [0. for k in Rates] )
if standardizeData:
y - y.mean()
A = np.dot(Xinv, y)
# Compute "residual sum of squares" (note loss function is different for L1-norm)
diff = SumSpectra(A, Rates, Times) - Data
rss = np.dot(diff,diff) # Residual sum of squares
if CalcNdof:
Xsub, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)
XT = np.transpose(Xsub)
I_XT = np.eye(XT.shape[0])
I_X = np.eye(Xsub.shape[0])
Xtemp = np.dot(Xsub, np.linalg.inv(np.dot(XT,Xsub) + w*I_XT))
ndof = np.trace(I_X - np.dot(Xtemp,XT))
else:
ndof = None
return A, rss, ndof
def scaleValues(values):
"""Scale a numpy array of values so that (min, max) = (0,1)."""
values = values - values.min()
return values/values.max()
def scaleValuesWithInfo(values):
"""Scale a numpy array of values so that (min, max) = (0,1).
Returns:
scaled values
Scaleby
Shiftby -- i.e. to get back the original values, first multiply by Scaleby, then add Shiftby
"""
Shiftby = values.min()
values = values - Shiftby
Scaleby = values.max()
return values/Scaleby, Scaleby, Shiftby
| mit |
harish2rb/pyGeoNet | pygeonet_V3/pygeonet_network_delineation.py | 1 | 18420 | import pandas as pd
import numpy as np
import os
import numpy.ma as npma
import grass.script as g
import grass.script.setup as gsetup
import prepare_pygeonet_defaults as defaults
import prepare_pygeonet_inputs as Parameters
import pygeonet_plot as pyg_plt
import pygeonet_vectorio as pyg_vio
# Compute discrete geodesics
def compute_discrete_geodesic(geodesicDistanceArray, skeletonEndPoint,
doTrueGradientDescent, num):
# Extract a discrete geodesic path in 2D
# D = geodesic distance matrix
# x = channel head or start point
# path = variable that stores the pixel values of the stream line.
skeletonEndPoint = skeletonEndPoint[:]
# print skeletonEndPoint[:]
streamPathPixelList = skeletonEndPoint[:]
# print 'skeletonEndPoint',skeletonEndPoint
# Creating the 8 cell neighbor moves
tempArrayDxMoves = [1, -1, 0, 0, 1, -1, 1, -1]
tempArrayDyMoves = [0, 0, 1, -1, 1, -1, -1, 1]
tempArray = [tempArrayDxMoves, tempArrayDyMoves]
# Get the geodesic value for the channel head
channelHeadGeodesicDistance = geodesicDistanceArray[skeletonEndPoint[0], skeletonEndPoint[1]]
# print 'channelHeadGeodesicDistance',channelHeadGeodesicDistance
# Get the size of the geodesic distance
geodesicDistanceArraySize = geodesicDistanceArray.shape
# print geodesicDistanceArraySize
# While we find a geodesic distance less then previous value
while True:
cardinalDxMoves = [1, -1, 0, 0]
cardinalDyMoves = [0, 0, 1, -1]
diagonalDxMoves = [1, -1, 1, -1]
diagonalDyMoves = [1, -1, -1, 1]
cardinalAllPossibleMoves = [cardinalDxMoves, cardinalDyMoves]
diagonalAllPossibleMoves = [diagonalDxMoves, diagonalDyMoves]
tempStreamPathPixelList = streamPathPixelList[:, -1]
# print tempStreamPathPixelList
tempStreamPathPixelListA = np.array([[tempStreamPathPixelList[0]], \
[tempStreamPathPixelList[1]]])
cardinalSkeletonEndPoint = np.repeat(tempStreamPathPixelListA, 4, axis=1) + \
cardinalAllPossibleMoves
diagonalSkeletonEndPoint = np.repeat(tempStreamPathPixelListA, 4, axis=1) + \
diagonalAllPossibleMoves
r1 = cardinalSkeletonEndPoint.tolist()[0]
r2 = cardinalSkeletonEndPoint.tolist()[1]
r3 = diagonalSkeletonEndPoint.tolist()[0]
r4 = diagonalSkeletonEndPoint.tolist()[1]
neighborPixelSkeletonEndPointList = np.array([r1 + r3, r2 + r4])
r5 = neighborPixelSkeletonEndPointList.tolist()[0]
r6 = neighborPixelSkeletonEndPointList.tolist()[1]
# Get the indices which are not on boundary
cardinalAllowedIndex0 = np.array([cardinalSkeletonEndPoint[0, :] > 0] and \
[cardinalSkeletonEndPoint[0, :] <
geodesicDistanceArraySize[0]])
cardinalAllowedIndex1 = np.array([cardinalSkeletonEndPoint[1, :] > 0] and \
[cardinalSkeletonEndPoint[1, :] <
geodesicDistanceArraySize[1]])
cardinalAllowedIndex = cardinalAllowedIndex0 * cardinalAllowedIndex1
diagonalAllowedIndex0 = np.array([diagonalSkeletonEndPoint[0, :] > 0] and \
[diagonalSkeletonEndPoint[0, :] <
geodesicDistanceArraySize[0]])
diagonalAllowedIndex1 = np.array([diagonalSkeletonEndPoint[1, :] > 0] and \
[diagonalSkeletonEndPoint[1, :] <
geodesicDistanceArraySize[1]])
diagonalAllowedIndex = diagonalAllowedIndex0 * diagonalAllowedIndex1
allAllowedIndex0 = np.array([neighborPixelSkeletonEndPointList[0, :] > 0] and \
[neighborPixelSkeletonEndPointList[0, :] <
geodesicDistanceArraySize[0]])
allAllowedIndex1 = np.array([neighborPixelSkeletonEndPointList[1, :] > 0] and \
[neighborPixelSkeletonEndPointList[1, :] <
geodesicDistanceArraySize[1]])
allAllowedIndex = allAllowedIndex0 * allAllowedIndex1
# Now remove neighbors that are no boundary
# build the true false array
tfCarray = np.array([cardinalAllowedIndex[0], cardinalAllowedIndex[0]])
tfCarrayMask = np.zeros((tfCarray.shape))
tfCarrayMask[tfCarray == False] = 1
popinfC = np.where(tfCarray[0, :] == False)
# print popinfC
tfDarray = np.array([diagonalAllowedIndex[0], diagonalAllowedIndex[0]])
tfDarrayMask = np.zeros((tfDarray.shape))
tfDarrayMask[tfDarray == False] = 1
popinfD = np.where(tfDarray[0, :] == False)
# print popinfD
tfAarray = np.array([allAllowedIndex[0], allAllowedIndex[0]])
tfAarrayMask = np.zeros((tfAarray.shape))
tfAarrayMask[tfAarray == False] = 1
popinfA = np.where(tfAarray[0, :] == False)
# print popinfA
# Now remove the false indices from our neighborhood matrix
# Now arrange the arrays above
cardinalSkeletonEndPointAllowed = npma.masked_array(cardinalSkeletonEndPoint, \
mask=tfCarrayMask)
diagonalSkeletonEndPointAllowed = npma.masked_array(diagonalSkeletonEndPoint, \
mask=tfDarrayMask)
neighborPixelSkeletonEndPointListAllowed = npma.masked_array(
neighborPixelSkeletonEndPointList, \
mask=tfAarrayMask)
rw1 = neighborPixelSkeletonEndPointListAllowed[0, :]
rw2 = neighborPixelSkeletonEndPointListAllowed[1, :]
rw3 = cardinalSkeletonEndPointAllowed[0, :]
rw4 = cardinalSkeletonEndPointAllowed[1, :]
rw5 = diagonalSkeletonEndPointAllowed[0, :]
rw6 = diagonalSkeletonEndPointAllowed[1, :]
# Get the minimum value of geodesic distance in the 8 cell neighbor
# Get the values of D(I) and adjust values for diagonal elements
try:
allGeodesicDistanceList = np.array(geodesicDistanceArray[rw1[~rw1.mask], \
rw2[~rw2.mask]])
# new line
cardinalPixelGeodesicDistanceList = np.array(geodesicDistanceArray[rw3[~rw3.mask], \
rw4[~rw4.mask]])
diagonalPixelGeodesicDistanceList = np.array(geodesicDistanceArray[rw5[~rw5.mask], \
rw6[~rw6.mask]])
except:
print(neighborPixelSkeletonEndPointList)
print(allAllowedIndex)
print(allGeodesicDistanceList)
print(popinfC)
print(popinfD)
print(popinfA)
print(rw1, rw2, rw3, rw4, rw5, rw6)
print(rw1[~rw1.mask])
print(rw2[~rw2.mask])
# We have to insert np.nan values for masked values
allFinal = np.zeros((1, 8))
# print popinfA
allFinal[0, popinfA[0]] = np.nan
aF = 0
cardinalFinal = np.zeros((1, 4))
# print popinfC
cardinalFinal[0, popinfC[0]] = np.nan
cF = 0
diagonalFinal = np.zeros((1, 4))
# print popinfD
diagonalFinal[0, popinfD[0]] = np.nan
dF = 0
# print allFinal,cardinalFinal,diagonalFinal
for aFi in range(0, 8):
if ~np.isnan(allFinal[0, aFi]):
allFinal[0, aFi] = allGeodesicDistanceList[aF]
aF = aF + 1
# --------
for cFi in range(0, 4):
if ~np.isnan(cardinalFinal[0, cFi]):
cardinalFinal[0, cFi] = cardinalPixelGeodesicDistanceList[cF]
cF = cF + 1
# --------
for dFi in range(0, 4):
if ~np.isnan(diagonalFinal[0, dFi]):
diagonalFinal[0, dFi] = diagonalPixelGeodesicDistanceList[dF]
dF = dF + 1
# --------
del allGeodesicDistanceList, cardinalPixelGeodesicDistanceList, \
diagonalPixelGeodesicDistanceList
allGeodesicDistanceList = allFinal
cardinalPixelGeodesicDistanceList = cardinalFinal
diagonalPixelGeodesicDistanceList = diagonalFinal
# for cells in horizontal and vertical positions to the
# current cell
cardinalPixelGeodesicDistanceList = channelHeadGeodesicDistance - \
cardinalPixelGeodesicDistanceList
# for cells in the diagonal position to the current cell
diagonalPixelGeodesicDistanceList = (channelHeadGeodesicDistance - \
diagonalPixelGeodesicDistanceList) / np.sqrt(2)
tcL = cardinalPixelGeodesicDistanceList.tolist()
tdL = diagonalPixelGeodesicDistanceList.tolist()
neighborPixelGeodesicDistanceList = np.array(tcL[0] + tdL[0])
# get the index of the maximum geodesic array
chosenGeodesicIndex = np.argmax(neighborPixelGeodesicDistanceList)
# This is required to break out of the while loop
chosenGeodesicDistanceFromAll = np.amin(allGeodesicDistanceList)
neighborPixelSkeletonEndPointList = neighborPixelSkeletonEndPointList[:,
chosenGeodesicIndex]
if chosenGeodesicDistanceFromAll > channelHeadGeodesicDistance:
break
elif np.isnan(chosenGeodesicDistanceFromAll):
print("equal NaN")
break
channelHeadGeodesicDistance = chosenGeodesicDistanceFromAll
# print 'afetr assig:',channelHeadGeodesicDistance
# print channelHeadGeodesicDistance
# Finally add the value of neighborPixelSkeletonEndPointList
# to path list
b = np.array([[neighborPixelSkeletonEndPointList[0]], \
[neighborPixelSkeletonEndPointList[1]]])
# print 'b',b
streamPathPixelList = np.hstack((streamPathPixelList, b))
# print 'streamPathPixelList',streamPathPixelList
# stop
# print streamPathPixelList, streamPathPixelList.shape
return streamPathPixelList
def compute_discrete_geodesic_v1():
# this a new version using r.drain to extract discrete goedesics
gisbase = os.environ['GISBASE']
gisdbdir = Parameters.gisdbdir
locationGeonet = 'geonet'
mapsetGeonet = 'geonetuser'
print(gsetup.init(gisbase, gisdbdir, locationGeonet, mapsetGeonet))
# Read the filtered DEM
print('r.in.gdal')
outfilepathgeodesic = Parameters.geonetResultsDir
outfilenamegeodesic = Parameters.demFileName
outfilenamegeodesic = outfilenamegeodesic.split('.')[0] + '_geodesicDistance.tif'
inputgeodesictifile = outfilepathgeodesic + '\\' + outfilenamegeodesic
print('importing goedesic tif: {}'.format(inputgeodesictifile))
print(g.run_command('r.in.gdal', input=inputgeodesictifile,
output=outfilenamegeodesic, overwrite=True))
# The maximum number of points is 1024
# --- have to add a check---
# -- seems to run for large point shapefiles without fail.
print('importing channel heads shape file')
channeheadsshapefileName = Parameters.pointshapefileName
inputshapefilepath = Parameters.pointFileName
print(g.run_command('v.in.ogr', input=inputshapefilepath,
layer=channeheadsshapefileName, output=channeheadsshapefileName,
geometry='Point'))
print('executing r.drain')
print(g.run_command('r.drain', input=outfilenamegeodesic,
output='discretegeodesicsras',
start_points=channeheadsshapefileName))
print('thining the discrete geodesic raster')
print(g.run_command('r.thin', input='discretegeodesicsras',
output='discretegeodesicsrasthin'))
print('converting the raster geodesic to vector map')
print(g.run_command('r.to.vect', input='discretegeodesicsrasthin',
output='discretegeovec', type='line'))
print('exporting the geodesics as shapefile')
print(g.run_command('v.out.ogr', input='discretegeovec',
output=Parameters.drainagelineFileName,
format='ESRI_Shapefile'))
print('completed discrete geodesics')
# ---draining algorithm finished
def Channel_Reconstruct(geodesicPathsCellDic, numberOfEndPoints):
df_channel = pd.DataFrame({'Y': [], 'X': []})
for i in range(0, numberOfEndPoints):
streamPathPixelList = geodesicPathsCellDic[str(i)]
df_tempory = pd.DataFrame(streamPathPixelList.T, columns=['Y', 'X'])
df_channel = pd.concat([df_channel, df_tempory])
size_sr = df_channel.groupby(['Y', 'X']).size()
NewgeodesicPathsCellDic = {}
StartpointList = []
jx = []
jy = []
k = 0
for i in range(0, numberOfEndPoints):
for j in range(0, geodesicPathsCellDic[str(i)][0].size):
if j == 0:
if i != 0:
k += 1
StartpointList.append(
[geodesicPathsCellDic[str(i)][0, j], geodesicPathsCellDic[str(i)][1, j]])
NewgeodesicPathsCellDic[str(k)] = [[geodesicPathsCellDic[str(i)][0, j]],
[geodesicPathsCellDic[str(i)][1, j]]]
else:
if size_sr[
geodesicPathsCellDic[str(i)][0, j], geodesicPathsCellDic[str(i)][1, j]] == \
size_sr[
geodesicPathsCellDic[str(i)][0, j - 1], geodesicPathsCellDic[str(i)][
1, j - 1]]:
NewgeodesicPathsCellDic[str(k)][0].append(geodesicPathsCellDic[str(i)][0, j])
NewgeodesicPathsCellDic[str(k)][1].append(geodesicPathsCellDic[str(i)][1, j])
else:
if [geodesicPathsCellDic[str(i)][0, j],
geodesicPathsCellDic[str(i)][1, j]] not in \
StartpointList:
k += 1
jx.append(geodesicPathsCellDic[str(i)][1, j])
jy.append(geodesicPathsCellDic[str(i)][0, j])
NewgeodesicPathsCellDic[str(k - 1)][0].append(
geodesicPathsCellDic[str(i)][0, j])
NewgeodesicPathsCellDic[str(k - 1)][1].append(
geodesicPathsCellDic[str(i)][1, j])
NewgeodesicPathsCellDic[str(k)] = [[geodesicPathsCellDic[str(i)][0, j]],
[geodesicPathsCellDic[str(i)][1, j]]]
StartpointList.append([geodesicPathsCellDic[str(i)][0, j],
geodesicPathsCellDic[str(i)][1, j]])
else:
NewgeodesicPathsCellDic[str(k)][0].append(
geodesicPathsCellDic[str(i)][0, j])
NewgeodesicPathsCellDic[str(k)][1].append(
geodesicPathsCellDic[str(i)][1, j])
break
NewgeodesicPathsCellList = []
for key in NewgeodesicPathsCellDic.keys():
NewgeodesicPathsCellList.append(np.asarray(NewgeodesicPathsCellDic[key]))
numberOfEndPoints = len(StartpointList)
return NewgeodesicPathsCellDic, numberOfEndPoints, NewgeodesicPathsCellList, jx, jy
def Channel_Definition(xx, yy, geodesicDistanceArray, basinIndexArray, flowDirectionsArray):
# Do compute discrete geodesics
## compute_discrete_geodesic_v1()
print('Computing discrete geodesics')
numberOfEndPoints = len(xx)
geodesicPathsCellDic = {}
reachCodeDirectory = np.zeros((2, numberOfEndPoints))
outerbounds = geodesicDistanceArray.shape
for i in range(0, numberOfEndPoints):
print('EndPoint# ', (i + 1), '/', numberOfEndPoints)
xEndPoint = xx[i]
yEndPoint = yy[i]
skeletonEndPoint = np.array([[yEndPoint], [xEndPoint]])
watershedLabel = basinIndexArray[yEndPoint, xEndPoint]
print('watershedLabel', watershedLabel)
watershedIndexList = basinIndexArray == watershedLabel
geodesicDistanceArrayMask = np.zeros((geodesicDistanceArray.shape))
geodesicDistanceArrayMask[watershedIndexList] = \
geodesicDistanceArray[watershedIndexList]
geodesicDistanceArrayMask[geodesicDistanceArrayMask == 0] = np.Inf
streamPathPixelList = compute_discrete_geodesic(geodesicDistanceArrayMask,
skeletonEndPoint,
defaults.doTrueGradientDescent, i)
geodesicPathsCellDic[str(i)] = streamPathPixelList
# print 'geodesicPathsCellList',geodesicPathsCellList
NewgeodesicPathsCellDic, numberOfEndPoints, geodesicPathsCellList, jx, jy = Channel_Reconstruct(
geodesicPathsCellDic,
numberOfEndPoints)
df_channel = pd.DataFrame(NewgeodesicPathsCellDic.items(), columns=['ID', 'PathCellList'])
df_channel.to_csv(Parameters.streamcellFileName, index=False)
if defaults.doPlot == 1:
pyg_plt.channel_plot(flowDirectionsArray,
geodesicPathsCellList,
xx, yy, 'flowDirectionsArray channel heads and streams')
# Write stream network as shapefiles
pyg_vio.write_drainage_paths(geodesicPathsCellList)
# Write stream junctions as shapefiles
pyg_vio.write_drainage_nodes(jx, jy, 'Junction', Parameters.junctionFileName,
Parameters.junctionshapefileName)
return NewgeodesicPathsCellDic, numberOfEndPoints
| gpl-3.0 |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/IPython/terminal/ipapp.py | 4 | 14215 | #!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~IPython.core.application.Application` object for the command
line :command:`ipython` program.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
import os
import sys
import warnings
from traitlets.config.loader import Config
from traitlets.config.application import boolean_flag, catch_config_error
from IPython.core import release
from IPython.core import usage
from IPython.core.completer import IPCompleter
from IPython.core.crashhandler import CrashHandler
from IPython.core.formatters import PlainTextFormatter
from IPython.core.history import HistoryManager
from IPython.core.application import (
ProfileDir, BaseIPythonApplication, base_flags, base_aliases
)
from IPython.core.magics import (
ScriptMagics, LoggingMagics
)
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.extensions.storemagic import StoreMagics
from .interactiveshell import TerminalInteractiveShell
from IPython.paths import get_ipython_dir
from traitlets import (
Bool, List, default, observe, Type
)
#-----------------------------------------------------------------------------
# Globals, utilities and helpers
#-----------------------------------------------------------------------------
_examples = """
ipython --matplotlib # enable matplotlib integration
ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
ipython --log-level=DEBUG # set logging to DEBUG
ipython --profile=foo # start with profile foo
ipython profile create foo # create profile foo w/ default config files
ipython help profile # show the help for the profile subcmd
ipython locate # print the path to the IPython directory
ipython locate profile foo # print the path to the directory for profile `foo`
"""
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit",
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
classic_config.InteractiveShell.separate_in = ''
classic_config.InteractiveShell.separate_out = ''
classic_config.InteractiveShell.separate_out2 = ''
classic_config.InteractiveShell.colors = 'NoColor'
classic_config.InteractiveShell.xmode = 'Plain'
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class LocateIPythonApp(BaseIPythonApplication):
description = """print the path to the IPython dir"""
subcommands = dict(
profile=('IPython.core.profileapp.ProfileLocate',
"print the path to an IPython profile directory",
),
)
def start(self):
if self.subapp is not None:
return self.subapp.start()
else:
print(self.ipython_dir)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = u'ipython'
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler
examples = _examples
flags = flags
aliases = aliases
classes = List()
interactive_shell_class = Type(
klass=object, # use default_value otherwise which only allow subclasses.
default_value=TerminalInteractiveShell,
help="Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends"
).tag(config=True)
@default('classes')
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
LoggingMagics,
StoreMagics,
]
deprecated_subcommands = dict(
qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
),
notebook=('notebook.notebookapp.NotebookApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
),
console=('jupyter_console.app.ZMQTerminalIPythonApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
),
nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
"DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
),
trust=('nbformat.sign.TrustNotebookApp',
"DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
),
kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
"DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
),
)
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
deprecated_subcommands['install-nbextension'] = (
"notebook.nbextensions.InstallNBExtensionApp",
"DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
)
subcommands.update(deprecated_subcommands)
# *do* autocreate requested profile, but don't create the config file.
auto_create=Bool(True)
# configurables
quick = Bool(False,
help="""Start IPython quickly by skipping the loading of config files."""
).tag(config=True)
@observe('quick')
def _quick_changed(self, change):
if change['new']:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True,
help="Whether to display a banner upon starting IPython."
).tag(config=True)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
).tag(config=True)
@observe('force_interact')
def _force_interact_changed(self, change):
if change['new']:
self.interact = True
@observe('file_to_run', 'code_to_run', 'module_to_run')
def _file_to_run_changed(self, change):
new = change['new']
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
# internal, not-configurable
something_to_run=Bool(False)
def parse_command_line(self, argv=None):
"""override to allow old '-pylab' flag with deprecation warning"""
argv = sys.argv[1:] if argv is None else argv
if '-pylab' in argv:
# deprecated `-pylab` given,
# warn and transform into current syntax
argv = argv[:] # copy, don't clobber
idx = argv.index('-pylab')
warnings.warn("`-pylab` flag has been deprecated.\n"
" Use `--matplotlib <backend>` and import pylab manually.")
argv[idx] = '--pylab'
return super(TerminalIPythonApp, self).parse_command_line(argv)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print self.extra_args
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = self.interactive_shell_class.instance(parent=self,
profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warnings.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
if not self.shell.last_execution_succeeded:
sys.exit(1)
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
app = TerminalIPythonApp()
app.config_file_paths.append(profile_dir)
app.load_config_file()
return app.config
launch_new_instance = TerminalIPythonApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
| mit |
PetaVision/projects | LIFLCA/python/main.py | 2 | 2386 | ###############################
## LCA ANALYSIS
## Dylan Paiton
##
###############################
import os, sys
lib_path = os.path.abspath('/home/ec2-user/workspace/PetaVision/plab')
sys.path.append(lib_path)
import pvAnalysis as pv
import plotWeights as pw
import numpy as np
import matplotlib.pyplot as plt
# Problem 1 setup:
#
# 585 512x512 grayscale images, presented sequentially
# 8x8 image patches, evenly tiled across image
# 1 image per batch
# 256 dictionary elements - 2x overcomplete because of rectification
# L-1 gradient descent
#
# displayPeriod = 40ms
# timeConstantTau = 100
# VThresh = 0.05
# dWMax = 1.0
# File Locations
#output_dir = '/Users/dpaiton/Documents/workspace/LIFLCA/output/LCA/'
output_dir = '/home/ec2-user/mountData/MaskLCA/LCA_OUTPUT/'
l1_layer = 'a2_L1.pvp'
err_layer = 'a1_Residual.pvp'
weights = 'w1_L1_to_Residual.pvp'
#weights = 'checkpoints/Checkpoint23400/L1_to_Residual_W.pvp'
# Open files
l1_activityFile = open(output_dir + l1_layer,'rb')
err_activityFile = open(output_dir + err_layer,'rb')
weightsFile = open(output_dir + weights,'rb')
progressPeriod = 1
startFrame = 0
lastFrame = -1 # -1 for all
skipFrames = 1
# outStruct has fields "time" and "values"
#print('L1:')
#(L1Struct,L1Hdr) = pv.get_pvp_data(l1_activityFile,progressPeriod,lastFrame,startFrame,skipFrames)
# Gar Method
# divide the L2 norm of the residual by the L2 of the input to the
# residiual (i.e the image) to get % error
#TODO: pass param for error method, there are 3 that I know of. Gar method, pSNR, SNR
#print('Err:')
#(errStruct,errHdr) = pv.get_pvp_data(err_activityFile,progressPeriod,lastFrame,startFrame,skipFrames)
#Recon error?
# ABS gives distance from 0.
# Averaging over the 512x512 array gives err per frame
#plt.plot(np.average(np.average(np.abs(err_outStruct["values"]),2),2))
#plt.show()
print('Weights:')
(weightStruct,weightsHdr) = pv.get_pvp_data(weightsFile,progressPeriod,lastFrame,startFrame,skipFrames)
#l1_activityFile.close()
#err_activityFile.close()
weightsFile.close()
i_arbor = 0
i_frame = 1 # index, not actual frame number
margin = 2 #pixels
showPlot = True
savePlot = True
saveName = output_dir+'analysis/'+weights[:-4]+'_'+str(i_frame).zfill(5)+'.png'
weight_mat = pw.plotWeights(weightStruct,i_arbor,i_frame,margin,showPlot,savePlot,saveName)
| epl-1.0 |
Adai0808/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
arcyfelix/Courses | 17-06-05-Machine-Learning-For-Trading/35_daily_portfolio_value.py | 1 | 1820 | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator(start_date, end_date):
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
return df
def normalize_data(df):
return df / df.iloc[0,:]
def plot(df, title):
ax = df.plot(title = title, fontsize = 12)
ax.set_xlabel('Date')
ax.set_ylabel(title)
plt.show()
def calculate_portfolio_value(normalized_df, init_investment, symbols, allocation_fraction):
stock_investment = init_investment * allocation_fraction
stock_values = normalized_df[symbols] * stock_investment
portfolio_value = stock_values.sum(axis = 1)
plot(portfolio_value, 'Portfolio value')
if __name__ == "__main__":
start = '2013-05-01'
end = '2013-12-31'
symbols = ['SPY', 'AAPL', 'GOOG', 'IBM']
dates = dates_creator(start, end)
df = get_data(symbols, dates)
# Daily portfolio
normalized = normalize_data(df)
allocation_fraction = np.array([0.5, 0.0, 0.0, 0.5])
start_investment = 1e6 # in USD
calculate_portfolio_value(normalized, start_investment, symbols, allocation_fraction) | apache-2.0 |
adamcandy/QGIS-Meshing | extras/shape/checkShapeFile.py | 3 | 3739 | import shapefile
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
from shapely.geometry import *
import sys
import matplotlib.pyplot as pyplot
#make a point shape file
#p1 = Point(0.5,0.5)
poly = Polygon([(0,0),(1,0),(1,1),(0,1),(0,0)])
#print(p1.within(poly))
#check if the number of command line arguments are
#ok
assert len(sys.argv)==4, "Incorrect Number of Arguments passed"
"""
Sets the read and the write file stream according to
the command line arguments given.
The first argument specifies which shape file the user
wants to specify the boundaries of
The second arguments specifies the boundary polygon
The third argument specifies the file path to which the
new shape has top be written
"""
readPath = sys.argv[1]
boundaryPath = sys.argv[2]
writePath = sys.argv[3]
#input stream for the given shape
sf = shapefile.Reader(readPath)
#input stream of the boundaries
bounds = shapefile.Reader(boundaryPath)
#checks that there shouldonly be one boudary
#assert len(bounds.shapes())==1, "More than one shape in the boundary. Currently only one shape can be specified as a boundary"
boundary = bounds.shapes()
#assert len(boundaries)==1 , "Invalid number of boundaries. Only one baoundary is allowed"
boundaryPoints = list(boundary.exterior.coords)
print("boundaryPoints len = %d" % len(boundaryPoints))
"""
This function checks if the given point is on the boundary.
@param point : specifies the point which has to be checked
@param bouds : specifies all the bpundary objects
@return : returns true iff the point in on the boundary lines
"""
def checkPointOnBoundary(point):
if (point.x,point.y) in boundaryPoints:
return True
points = boundary.exterior.coords
numberOfPoints = len(points)
for i in range(numberOfPoints-1):
line = LineString([(points[i][0],points[i][1]),(points[i+1][0], points[i+1][1])])
if (line.contains(point)):
return True
return False
w = []
i = -1
c = 1
for shape in shapes:
i += 1
w.append(shapefile.Writer(shapefile.Polygon))
for point in pointslist[i]:
p = Point(point[0],point[1])
if p.within(boundary) or checkPointOnBoundary(p):
w[i].point(p.x,p.y)
w[i].field('%d_FLD' % c)
c +=1
"""
if len(w[i].shapes())>=2:
w[i].save(writePath)
"""
print("Number of shapes = %d \n\n" % len(w))
n = 0
for shape in w:
print("shape %d contains %d points" %(n+1, len(w[i].shapes())))
n += 1
for wr in w:
for s in wr.shapes():
x=[];y=[]
for p in s.points:
print(p)
x.append(p.x)
y.append(p.y)
pyplot(x,y)
pyplot.xlim(-5, 5)
pyplot.ylim(-5, 5)
pyplot.show()
| lgpl-2.1 |
terkkila/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
pkruskal/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
dilawar/moose-full | moose-core/python/moose/recording.py | 2 | 4617 | from . import moose as _moose
_tick = 8
_base = '/_utils'
_path = _base + '/y{0}'
_counter = 0
_plots = []
_moose.Neutral( _base )
_defaultFields = {
_moose.Compartment : 'Vm',
_moose.ZombieCompartment : 'Vm',
_moose.HHChannel: 'Gk',
_moose.ZombieHHChannel: 'Gk',
_moose.HHChannel2D: 'Gk',
_moose.SynChan: 'Gk',
_moose.CaConc: 'Ca',
_moose.ZombieCaConc: 'Ca',
_moose.Pool: 'conc',
_moose.ZombiePool: 'conc',
_moose.ZPool: 'conc',
_moose.BufPool: 'conc',
_moose.ZombieBufPool: 'conc',
_moose.ZBufPool: 'conc',
_moose.FuncPool: 'conc',
_moose.ZombieFuncPool: 'conc',
_moose.ZFuncPool: 'conc',
}
def _defaultField( obj ):
return _defaultFields[ type( obj ) ]
def setDt( dt ):
'''-----------
Description
-----------
Sets time-step for recording values.
---------
Arguments
---------
dt: Time-step for recording values.
-------
Returns
-------
Nothing.'''
_moose.setClock( _tick, dt )
class SetupError( Exception ):
pass
def _time( npoints = None ):
import numpy
if npoints is None:
try:
npoints = len( _plots[ 0 ].vec )
except IndexError:
raise SetupError(
'List of time-points cannot be constructed because '
'no plots have been set up yet.'
)
begin = 0.0
end = _moose.Clock( '/clock' ).currentTime
return numpy.linspace( begin, end, npoints )
class _Plot( _moose.Table ):
def __init__( self, path, obj, field, label ):
_moose.Table.__init__( self, path )
self._table = _moose.Table( path )
self.obj = obj
self.field = field
self.label = label
@property
def values( self ):
return self._table.vec
@property
def size( self ):
return len( self.values )
@property
def time( self ):
return _time( self.size )
def __iter__( self ):
return iter( self.values )
def record( obj, field = None, label = None ):
'''
'''
global _counter
# Checking if object is an iterable like list or a tuple, but not a string.
if hasattr( obj, '__iter__' ):
return [ record( o, field, label ) for o in obj ]
if isinstance( obj, str ):
obj = _moose.element( obj )
if field is None:
field = _defaultField( obj )
path = _path.format( _counter )
_counter += 1
p = _Plot( path, obj, field, label )
_plots.append( p )
_moose.connect( p, "requestData", obj, 'get_' + field )
_moose.useClock( _tick, path, "process" )
return p
def _label( plot, labelFormat = '{path}.{field}' ):
# Over-ride label format if label has been given explicitly.
if plot.label:
labelFormat = plot.label
return labelFormat.format(
path = plot.obj.path,
name = plot.obj.name,
field = plot.field
)
def _selectedPlots( selected ):
if selected is None:
# Returning a copy of this list, instead of reference. The returned
# list will be manipulated later.
return _plots[ : ]
elif isinstance( selected, _Plot ):
return [ selected ]
else:
return selected
def saveCSV(
fileName,
selected = None,
delimiter = '\t',
header = True,
headerCommentCharacter = '#',
labelFormat = '{path}.{field}',
timeCol = True,
timeHeader = 'Time',
fileMode = 'w' ):
'''
'''
import csv
plots = _selectedPlots( selected )
if header:
header = []
if timeCol:
header.append( timeHeader )
for plot in plots:
header.append( _label( plot, labelFormat ) )
header[ 0 ] = headerCommentCharacter + header[ 0 ]
if timeCol:
plots.insert( 0, _time() )
with open( fileName, fileMode ) as fout:
writer = csv.writer( fout, delimiter = delimiter )
if header:
writer.writerow( header )
writer.writerows( list(zip( *plots )) )
def saveXPLOT(
fileName,
selected = None,
labelFormat = '{path}.{field}',
fileMode = 'w' ):
'''
'''
plots = _selectedPlots( selected )
with open( fileName, fileMode ) as fout:
write = lambda line: fout.write( line + '\n' )
for ( i, plot ) in enumerate( plots ):
label = '/plotname ' + _label( plot, labelFormat )
if i > 0:
write( '' )
write( '/newplot' )
write( label )
for value in plot:
write( str( value ) )
def show(
selected = None,
combine = True,
labelFormat = '{path}.{field}',
xLabel = 'Time (s)',
yLabel = '{field}' ):
'''
'''
try:
from matplotlib import pyplot as plt
except ImportError:
print("Warning: recording.show(): Cannot find 'matplotlib'. Not showing plots.")
return
plots = _selectedPlots( selected )
if combine:
plt.figure()
for plot in plots:
if not combine:
plt.figure()
print(_label( plot ))
plt.plot( plot.time, plot.values, label = _label( plot ) )
plt.legend()
plt.show()
def HDF5():
pass
| gpl-2.0 |
taimir/infogan-keras | model_test.py | 1 | 8240 | import sys
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
def get_session(gpu_fraction=0.8):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
KTF.set_session(get_session())
import numpy as np
from scipy.stats import mode
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from sklearn import svm
from sklearn.decomposition import PCA
from learn.models import InfoGAN
from learn.stats.distributions import Categorical, IsotropicGaussian, Bernoulli
from learn.utils.visualization import ROCView, micro_macro_roc, cluster_silhouette_view
batch_size = 256
n_classes = 10
def run_c1_only(roc_view, model, x_test, y_test, experiment_id):
# encodings: list of arrays with shape (N, salient_dim)
encodings_list = model.encode(x_test)
# check performance only based on c1 classification
c1 = [e for e in encodings_list if e.shape[1] == 10][0]
c1 = np.argmax(c1, axis=1)
# TODO: if the model is not too god, in some cases the class
# coverage might not be complete
c1_map = np.zeros_like(y_test)
for digit in range(10):
digit_map = mode(c1[y_test == digit])[0][0]
c1_map[c1 == digit_map] = digit
acc = sum(y_test == c1_map) / len(y_test)
print("Class. accuracy based on c1 (categorical latent): {}".format(acc))
res = micro_macro_roc(n_classes,
y_expected=to_categorical(y_test, num_classes=n_classes),
y_predicted=to_categorical(c1_map, num_classes=n_classes))
micro_fpr, micro_tpr = res['micro']
roc_view.add_curve(micro_fpr, micro_tpr, "infogan c1 only, micro")
macro_fpr, macro_tpr = res['macro']
roc_view.add_curve(macro_fpr, macro_tpr, "infogan c1 only, macro")
def run_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id):
# check the performance based on the original images with an SVM
# training only on 5 % of the training data, simulating a semi-supervised scenario
x_train = x_train.reshape((-1, 784))[:2500]
y_train = y_train[:2500]
x_test = x_test.reshape((-1, 784))
classifier = svm.SVC()
classifier.fit(x_train, y_train)
test_preds = classifier.predict(x_test)
acc = sum(y_test == test_preds) / len(y_test)
print("Class. accuracy based on original representation: {}".format(acc))
res = micro_macro_roc(n_classes,
y_expected=to_categorical(y_test, num_classes=n_classes),
y_predicted=to_categorical(test_preds, num_classes=n_classes))
micro_fpr, micro_tpr = res['micro']
roc_view.add_curve(micro_fpr, micro_tpr, "original SVM, micro")
macro_fpr, macro_tpr = res['macro']
roc_view.add_curve(macro_fpr, macro_tpr, "original SVM, macro")
def run_pca_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id, n_pca=12):
x_train = x_train.reshape((-1, 784))[:2500]
y_train = y_train[:2500]
x_test = x_test.reshape((-1, 784))
pca = PCA(n_components=n_pca)
pca.fit(x_train)
# check the performance based on n_pca PCA features with an SVM
test_encodings = pca.transform(x_test)
train_encodings = pca.transform(x_train)
classifier = svm.SVC()
classifier.fit(train_encodings, y_train)
test_preds = classifier.predict(test_encodings)
acc = sum(y_test == test_preds) / len(y_test)
print("Class. accuracy based on PCA latents: {}".format(acc))
res = micro_macro_roc(n_classes,
y_expected=to_categorical(y_test, num_classes=n_classes),
y_predicted=to_categorical(test_preds, num_classes=n_classes))
micro_fpr, micro_tpr = res['micro']
roc_view.add_curve(micro_fpr, micro_tpr, "pca latent, micro")
macro_fpr, macro_tpr = res['macro']
roc_view.add_curve(macro_fpr, macro_tpr, "pca latent, macro")
def run_infogan_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id):
# check the performance based on all infogan features with an SVM
test_encodings = np.concatenate(model.encode(x_test[:2500]), axis=1)
y_test = y_test[:2500]
train_encodings = np.concatenate(model.encode(x_train), axis=1)
classifier = svm.SVC()
classifier.fit(train_encodings, y_train)
test_preds = classifier.predict(test_encodings)
acc = sum(y_test == test_preds) / len(y_test)
print("Class. accuracy based on InfoGAN latents: {}".format(acc))
res = micro_macro_roc(n_classes,
y_expected=to_categorical(y_test, num_classes=n_classes),
y_predicted=to_categorical(test_preds, num_classes=n_classes))
micro_fpr, micro_tpr = res['micro']
roc_view.add_curve(micro_fpr, micro_tpr, "infogan latent, micro")
macro_fpr, macro_tpr = res['macro']
roc_view.add_curve(macro_fpr, macro_tpr, "infogan latent, macro")
def run_cluster_evaluation(model, x_test, y_test, experiment_id):
test_encodings = np.concatenate(model.encode(x_test), axis=1)
# produce a clustering evaluation
cluster_silhouette_view(test_encodings, y_test,
os.path.join(experiment_id, "silhouette_score.png"),
n_clusters=n_classes)
def test_mnist_performance(model, x_test, y_test, x_train, y_train, experiment_id):
roc_view = ROCView()
run_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id)
run_pca_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id)
run_infogan_svm(roc_view, model, x_train, y_train, x_test, y_test, experiment_id)
run_cluster_evaluation(model, x_test, y_test, experiment_id)
roc_view.save_and_close(os.path.join(experiment_id, "ROC.png"))
if __name__ == "__main__":
experiment_id = sys.argv[1]
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape((-1, 28, 28, 1)) / 255
x_test = x_test.reshape((-1, 28, 28, 1)) / 255
x_val = x_train[:1000]
y_val = y_train[:1000]
x_train = x_train[1000:]
y_train = y_train[1000:]
datagen = ImageDataGenerator(data_format='channels_last')
datagen.fit(x_train)
def data_generator():
return datagen.flow(x_train, batch_size=batch_size)
meaningful_dists = {'c1': Categorical(n_classes=10),
'c2': IsotropicGaussian(dim=1),
'c3': IsotropicGaussian(dim=1)
}
noise_dists = {'z': IsotropicGaussian(dim=62)}
image_dist = Bernoulli()
prior_params = {'c1': {'p_vals': np.ones((batch_size, 10), dtype=np.float32) / 10},
'c2': {'mean': np.zeros((batch_size, 1), dtype=np.float32),
'std': np.ones((batch_size, 1), dtype=np.float32)},
'c3': {'mean': np.zeros((batch_size, 1), dtype=np.float32),
'std': np.ones((batch_size, 1), dtype=np.float32)},
'z': {'mean': np.zeros((batch_size, 62), dtype=np.float32),
'std': np.ones((batch_size, 62), dtype=np.float32)}
}
model = InfoGAN(batch_size=batch_size,
image_shape=(28, 28, 1),
noise_dists=noise_dists,
meaningful_dists=meaningful_dists,
image_dist=image_dist,
prior_params=prior_params,
supervised_dist_name="c1")
gen_weights_filepath = os.path.join(experiment_id, "gen_train_model.hdf5")
disc_weights_filepath = os.path.join(experiment_id, "disc_train_model.hdf5")
model.load_weights(gen_weights_filepath, disc_weights_filepath)
test_mnist_performance(model, x_test, y_test, x_train, y_train, experiment_id)
KTF.get_session().close()
| mit |
nan86150/ImageFusion | lib/python2.7/site-packages/scipy/signal/waveforms.py | 17 | 14814 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `t` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| mit |
pcm17/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
femtotrader/pandas_talib | pandas_talib/__init__.py | 1 | 15199 | '''
Created on April 15, 2012
Last update on July 18, 2015
@author: Bruno Franca
@author: Peter Bakker
@author: Femto Trader
'''
import pandas as pd
import numpy as np
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(df[price].rolling(n).mean(), name=name)
return out(SETTINGS, df, result)
def emaHelper(price, n, alphaIn=None):
"""
Algorithm by Stockchart
"""
length_of_df = len(price.axes[0])
initial_sma = price[0:n].mean()
ema = pd.Series(np.nan, index=range(0, length_of_df))
ema.iat[n-1] = initial_sma
if(not alphaIn):
alpha = (2.0/(n + 1.0))
else:
alpha = alphaIn
for i in range(n, length_of_df):
ema.iat[i] = price.iat[i]* alpha + (1-alpha)* ema.iat[i-1]
return ema
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result = emaHelper(df[price], n)
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
L = len(df['High'])
TR_l = [None]*L
for i in range(1, L):
TR = max(df['High'].iloc[i] - df['Low'].iloc[i], \
abs(df['High'].iloc[i] - df['Close'].iloc[i-1]), \
abs(df['Low'].iloc[i] - df['Close'].iloc[i-1]) )
TR_l[i] = TR
TR_s = pd.Series(TR_l[1::])
alpha = 1.0/n
result = emaHelper(TR_s, n, alpha)
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(df[price].rolling(n).mean())
MSD = pd.Series(df[price].rolling(n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(SOk.ewm(span=n, min_periods=n - 1).mean(), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = df[key].rolling(timeperiod, min_periods=timeperiod).mean()
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = df['Close'].ewm(span=n, min_periods=n - 1).mean()
EX2 = EX1.ewm(span=n, min_periods=n - 1).mean()
EX3 = EX2.ewm(span=n, min_periods=n - 1).mean()
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('High')]
DoMove = df.iat[i, df.columns.get_loc('Low')] - df.iat[i + 1, df.columns.get_loc('Low')]
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.iat[i + 1, df.columns.get_loc('High')], df.iat[i, df.columns.get_loc('Close')]) - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n - 1).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n - 1).mean() / ATR)
temp = abs(PosDI - NegDI) / (PosDI + NegDI)
result = pd.Series(temp.ewm(span=n_ADX, min_periods=n_ADX - 1).mean(), name='ADX_' + str(n) + '_' + str(n_ADX))
return out(SETTINGS, df, result)
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(df[price].ewm(span=n_fast, min_periods=n_slow - 1).mean())
EMAslow = pd.Series(df[price].ewm(span=n_slow, min_periods=n_slow - 1).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=8).mean(), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = Range.ewm(span=9, min_periods=8).mean()
EX2 = EX1.ewm(span=9, min_periods=8).mean()
Mass = EX1 / EX2
result = pd.Series(Mass.rolling(25).sum(), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.iat[i + 1, df.columns.get_loc('High')], df.iat[i, df.columns.get_loc('Close')]) - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('Low')]) - abs(df.iat[i + 1, df.columns.get_loc('Low')] - df.iat[i, df.columns.get_loc('High')])
VM.append(Range)
i = i + 1
result = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iat[i + 1, df.columns.get_loc('High')] - df.iat[i, df.columns.get_loc('High')]
DoMove = df.iat[i, df.columns.get_loc('Low')] - df.iat[i + 1, df.columns.get_loc('Low')]
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n - 1).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n - 1).mean())
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(M.ewm(span=r, min_periods=r - 1).mean())
aEMA1 = pd.Series(aM.ewm(span=r, min_periods=r - 1).mean())
EMA2 = pd.Series(EMA1.ewm(span=s, min_periods=s - 1).mean())
aEMA2 = pd.Series(aEMA1.ewm(span=s, min_periods=s - 1).mean())
result = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
return out(SETTINGS, df, result)
def ACCDIST(df, n):
"""
Accumulation/Distribution
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
result = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
return out(SETTINGS, df, result)
def Chaikin(df):
"""
Chaikin Oscillator
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
result = pd.Series(ad.ewm(span=3, min_periods=2).mean() - ad.ewm(span=10, min_periods=9).mean(), name='Chaikin')
return out(SETTINGS, df, result)
def MFI(df, n):
"""
Money Flow Index and Ratio
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < len(df) - 1: # df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.iat[i + 1, df.columns.get_loc('Volume')])
else:
PosMF.append(0)
i=i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
result = pd.Series(MFR.rolling(n).mean(), name='MFI_' + str(n))
return out(SETTINGS, df, result)
def OBV(df, n):
"""
On-balance Volume
"""
i = 0
OBV = [0]
while i < len(df) - 1: # df.index[-1]:
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] > 0:
OBV.append(df.iat[i + 1, df.columns.get_loc('Volume')])
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] == 0:
OBV.append(0)
if df.iat[i + 1, df.columns.get_loc('Close')] - df.iat[i, df.columns.get_loc('Close')] < 0:
OBV.append(-df.iat[i + 1, df.columns.get_loc('Volume')])
i = i + 1
OBV = pd.Series(OBV)
result = pd.Series(OBV.rolling(n).mean(), name='OBV_' + str(n))
return out(SETTINGS, df, result)
def FORCE(df, n):
"""
Force Index
"""
result = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
return out(SETTINGS, df, result)
def EOM(df, n):
"""
Ease of Movement
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
result = pd.Series(EoM.rolling(n).mean(), name='EoM_' + str(n))
return out(SETTINGS, df, result)
def CCI(df, n):
"""
Commodity Channel Index
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
result = pd.Series((PP - PP.rolling(n).mean()) / PP.rolling(n).std(), name='CCI_' + str(n))
return out(SETTINGS, df, result)
def COPP(df, n):
"""
Coppock Curve
"""
M = df['Close'].diff(int(n * 11 / 10) - 1)
N = df['Close'].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df['Close'].diff(int(n * 14 / 10) - 1)
N = df['Close'].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
temp = ROC1 + ROC2
result = pd.Series(temp.ewm(span=n, min_periods=n).mean(), name='Copp_' + str(n))
return out(SETTINGS, df, result)
def KELCH(df, n):
"""
Keltner Channel
"""
temp = (df['High'] + df['Low'] + df['Close']) / 3
KelChM = pd.Series(temp.rolling(n).mean(), name='KelChM_' + str(n))
temp = (4 * df['High'] - 2 * df['Low'] + df['Close']) / 3
KelChU = pd.Series(temp.rolling(n).mean(), name='KelChU_' + str(n))
temp = (-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3
KelChD = pd.Series(temp.rolling(n).mean(), name='KelChD_' + str(n))
result = pd.DataFrame([KelChM, KelChU, KelChD]).transpose()
return out(SETTINGS, df, result)
def ULTOSC(df):
"""
Ultimate Oscillator
"""
i = 0
TR_l = [0]
BP_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.iat[i + 1, df.columns.get_loc('High')], df.iat[i, df.columns.get_loc('Close')]) - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
TR_l.append(TR)
BP = df.iat[i + 1, df.columns.get_loc('Close')] - min(df.iat[i + 1, df.columns.get_loc('Low')], df.iat[i, df.columns.get_loc('Close')])
BP_l.append(BP)
i = i + 1
result = pd.Series((4 * pd.Series(BP_l).rolling(7).sum() / pd.Series(TR_l).rolling(7).sum()) + (2 * pd.Series(BP_l).rolling(14).sum() / pd.Series(TR_l).rolling(14).sum()) + (pd.Series(BP_l).rolling(28).sum() / pd.Series(TR_l).rolling(28).sum()), name='Ultimate_Osc')
return out(SETTINGS, df, result)
def DONCH(df, n):
"""
Donchian Channel
"""
i = 0
DC_l = []
while i < n - 1:
DC_l.append(0)
i = i + 1
i = 0
while i + n - 1 < len(df) - 1: # df.index[-1]:
DC = max(df['High'].ix[i:i + n - 1]) - min(df['Low'].ix[i:i + n - 1])
DC_l.append(DC)
i = i + 1
DonCh = pd.Series(DC_l, name='Donchian_' + str(n))
result = DonCh.shift(n - 1)
return out(SETTINGS, df, result)
def STDDEV(df, n):
"""
Standard Deviation
"""
result = pd.Series(df['Close'].rolling(n).std(), name='STD_' + str(n))
return out(SETTINGS, df, result)
| mit |
aflaxman/scikit-learn | sklearn/linear_model/setup.py | 83 | 1719 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.pyx'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.pyx'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ngoix/OCRF | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
cpicanco/player_plugins | self_contained/colormaps.py | 1 | 51749 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2016 Rafael Picanço.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
# http://bids.github.io/colormap/
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
if __name__ == '__main__':
import numpy as np
import cv2
from glob import glob
import os
# load image file as numpy array
path = '/home/rafael/documents/doutorado/data_doc/003-Natan/2015-05-13/distance_0-695-329'
img_surface = cv2.imread(glob(os.path.join(path,'surface*'))[0],0)
v = np.asarray(_viridis_data)
v *= 255
v = v.astype(np.uint8)
# print v.shape, v.dtype
print img_surface.shape, img_surface.dtype
RLUT = v[:,0]
GLUT = v[:,1]
BLUT = v[:,2]
img = np.zeros((img_surface.shape[0],img_surface.shape[1],3), np.uint8)
img[:,:,0] = cv2.LUT(img_surface, BLUT)
img[:,:,1] = cv2.LUT(img_surface, GLUT)
img[:,:,2] = cv2.LUT(img_surface, RLUT)
while True:
cv2.imshow("input", img)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
cv2.destroyAllWindows() | gpl-3.0 |
JPFrancoia/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
tjhei/burnman_old | burnman/composition.py | 1 | 3529 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
#system libs:
import numpy
import scipy.optimize as opt
import scipy.integrate as integrate
import math
import matplotlib.pyplot as pyplot
#own libs:
import geotherm
from tools import *
# TODO: add up weight percent and check <100 and tell them how much
molar_mass = {'Fe':55.845/1000., 'Mg':24.305/1000., 'O':15.999/1000., 'Al':26.982/1000., 'Ca':40.078/1000., 'Si':28.085/1000.} # kg/mol
Av = 6.022141e23 # Avogadro constant in 1/mol
boltzmann_constant = 1.3806503e-23 # in m^2 kg s^-2 K^-1
gas_constant = Av * boltzmann_constant # in J mol^-1 K^-1
lower_mantle_mass = 4.043e24*.75 # in kg
# convert weight percentage (amount, 1.00 = 100%) of a given element to molar mass
def weight_pct_to_mol(element, amount):
return amount * lower_mantle_mass / molar_mass[element] * Av
def calculate_phase_percents(inp):
"""
Converts given weight percentages into the requisite percent of each phase
in mols and also returns the fraction of perovskite versus ferropericlase,
assuming all of the silcon goes into the perovskite phase
and with any remaining Fe or Mg going into the oxide phase.
Input:
inp={'Mg': ..., 'Fe': ..., ...} # in weight percent
Returns:
phase_per={'fp': ..., 'pv': ...} # as a fraction
rel_mol_per={'MgO: ..., 'FeO': ..., ...} # in mols
"""
names = {'Mg':'MgO','Fe':'FeO','Si':'SiO2', 'Ca':'Ca', 'Al':'Al'}
rel_mol_per = {}
out = {}
for a in inp:
out[names[a]] = weight_pct_to_mol(a,inp[a])
norm = out['MgO']+out['FeO']
for a in inp:
rel_mol_per[names[a]] = out[names[a]]/norm
frac_mol_SiO2 = rel_mol_per['SiO2']
phase_per={'fp':(1.-frac_mol_SiO2),'pv':frac_mol_SiO2}
return phase_per,rel_mol_per
def part_coef_calc(inp2,StartP,EndP,deltaP):
a = [] #partition coefficent of Fe in fp
b = [] #partition coefficent of Fe in pv
Pressure= []
Temperature=[]
counter = 0
def calculate_partition_coefficient(pressure, temperature, components):
frac_mol_FeO = components['FeO']
frac_mol_MgO = components['MgO']
frac_mol_SiO2 = components['SiO2']
delV = 2.e-7 #in m^3/mol, taken from Nakajima et al 2012, JGR
Kd_0 = .29 #Fig 5 Nakajima et al 2012
rs = ((25.e9-pressure)*(delV)/(gas_constant*temperature))+math.log(Kd_0) #eq 5 Nakajima et al 2012
K = math.exp(rs) #The exchange coefficent at P and T
num_to_sqrt = (-4.*frac_mol_FeO*(K-1.)*K*frac_mol_SiO2)+(pow(1.+(frac_mol_FeO*(K-1))+((K-1.)*frac_mol_SiO2),2.))
b = (-1. + frac_mol_FeO - (frac_mol_FeO*K)+frac_mol_SiO2 - (frac_mol_SiO2*K) + math.sqrt(num_to_sqrt)) \
/ (2.*frac_mol_SiO2*(1.-K))
a = b /(((1.-b)*K)+b)
return a,b #a is partition coefficent array with P for mw, b is pcarray for pv
# test some composition (Javoy 2010, Table 6, PLoM)
if __name__ == "__main__":
inp1 = {'Mg':0.213, 'Fe': 0.0626, 'Si':0.242, 'Ca':0., 'Al':0.} # wt%
inp2 = conv_inputs(inp1)
StartP = 23.83 #in GPa
EndP = 110.0
deltaP = 1.
P,T,a,b,frac_mol_pv,frac_mol_mw = part_coef_calc(inp2,StartP,EndP,deltaP)
gt = lambda p: geotherm.geotherm_brown_shankland(p)
pressure = StartP
temperature = gt(StartP)
calculate_partition_coefficient(pressure, temperature, inp2)
#part_coef_calc(inp2,StartP,EndP,deltaP)
#print inp1
#print inp2
#print t
| gpl-2.0 |
henridwyer/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
yuliang419/K2tools | lctools.py | 1 | 23315 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from math import floor, ceil
import warnings
from lmfit import minimize, Parameters, report_errors
from PyPDF2 import PdfFileReader, PdfFileWriter
import glob, os
import urllib
import matplotlib.image as mpimg
import model_transits
def sechmod(t, b, t0, w):
"""Fits a sech model to a transit as a faster, simpler alternative to the Mandel-Agol model.
INPUTS:
t - nd array of light curve times (days)
b - 2*transit depth, defined as negative
t0 - mid-transit time
w - width of transit (days)
RETURNS:
nd array of model fluxes
"""
warnings.simplefilter('ignore', RuntimeWarning)
return 1 + b / (np.exp(-(t - t0) ** 2. / w ** 2.) + np.exp((t - t0) ** 2. / w ** 2.))
def rect_sechmod(t, b, t0, w, a0, a1):
"""Fits a sech model with linear detrending of background.
INPUTS: see sechmod
a0, a1 - coefficients of linear detrending function. The background is modelled as a0 + a1*t
RETURNS: see sechmod
"""
warnings.simplefilter('ignore', RuntimeWarning)
return (1 + b / (np.exp(-(t - t0) ** 2. / w ** 2.) + np.exp((t - t0) ** 2. / w ** 2.))) * (a0 + a1 * t)
def residual(params, t, data, period=1, sech=True):
"""Residual function for fitting for midtransit times.
INPUTS:
params - lmfit.Parameters() object containing parameters to be fitted.
t - nd array of light curve times (days).
data - nd array of normalized light curve fluxes. Median out-of-transit flux should be set to 1.
period - period of transit (days).
sech - boolean. If True, will use sech model. Otherwise will fit Mandel-Agol model instead.
The params argument must match the format of the model chosen.
RETURNS:
res - residual of data - model, to be used in lmfit.
"""
if sech:
vals = params.valuesdict()
tc = vals['tc']
b = vals['b']
w = vals['w']
a0 = vals['a0']
a1 = vals['a1']
model = rect_sechmod(t, b, tc, w, a0, a1)
else:
vals = params.valuesdict()
tc = vals['tc']
b = vals['b']
r_a = vals['Rs_a']
Rp_Rs = vals['Rp_Rs']
F = vals['F']
gamma1 = vals['gamma1']
gamma2 = vals['gamma2']
a0 = vals['a0']
a1 = vals['a1']
model = model_transits.modeltransit([tc, b, r_a, Rp_Rs, F, gamma1, gamma2], model_transits.occultquad, period,
t)
model *= (a0 + a1 * t)
return data - model
def read_lc(name, p, t0, path='.'):
"""Read in data from file. If flux is not normalized, normalize first by setting mean of out-of-transit portion to 1.
Creates plot handle for full light curve plot.
INPUTS:
name - name of light curve file to be read. Assuming the target file is named "[name].txt".
p - best guess of transit period from BLS (days).
t0 - best guess of transit ephemeris from BLS (JD).
path - path of directory containing transit file.
RETURNS:
t - nd array of light curve times, with 3-sigma upward outliers in flux removed.
f - nd array of light curve fluxes, with 3-sigma upward outliers in flux removed.
newphase - nd array of light curve phases.
w - estimated transit duration, expressed as fraction of transit period.
depth - estimated transit depth (defined as negative).
fig - plot handle for full light curve plot.
"""
target = path + '/' + name + '.txt'
t, f = np.loadtxt(target, unpack=True, usecols=(0, 1))
fig = plt.figure(figsize=(12, 4))
plt.plot(t, f, lw=0, marker='.')
plt.xlabel('Time (days)')
plt.ylabel('Relative flux')
plt.ylim(min(f), max(f))
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
# plt.savefig('outputs/' + name + '_full_lc.pdf', dpi=150, bbox_inches='tight')
# sort by phase to plot folded light curve (may not be necessary for 1-pager)
t -= t0
ph = t / p - np.around(t / p)
order = sorted(range(len(ph)), key=lambda k: ph[k])
phase = ph[order]
f_sorted = f[order]
t_sorted = t[order]
p0 = [min(f_sorted) - 1., 0., 0.005]
# fit sech model to folded light curve to get params
popt, pcov = curve_fit(sechmod, phase, f_sorted, p0=p0)
fmod = sechmod(phase, *popt)
newphase = popt[1]
w = popt[2]
depth = popt[0] / 2.
res = f_sorted - fmod
sigma = np.std(res)
print sigma
# clip upward outliers
good = np.where(f_sorted < 1 + 3 * sigma)
f_sorted = f_sorted[good]
t_sorted = t_sorted[good]
order = sorted(range(len(t_sorted)), key=lambda k: t_sorted[k])
t = t_sorted[order] + t0
f = f_sorted[order]
return t, f, newphase, w, depth, fig
def plot_indiv_trans(name, t, f, p, t0, window, p0, plotbad=True, plots=True, sech=True):
""" Plot individual transits with a choice of sech or Mandel-Agol fit.
INPUTS:
t - nd array of light curve times.
f - nd array of normalized light curve fluxes.
window - approximate length of transit window (days). Include at least half a transit's worth of out-of-transit
light curve on either side of dip.
p0 - best guess of fit parameters. If sech, p0 = [w0, depth]. w0 is fractional width of transit from read_lc.
If Mandel-Agol, p0 = [b,Rs_a,Rp_Rs,gamma1,gamma2].
plotbad - set to True if you want to plot incomplete or misshapen transits along with good ones.
sech - set to True if you want a sech fit. Otherwise use Mandel-Agol model.
RETURNS:
dt_all - nd array showing the time (days) to the nearest transit for each point.
epochs - nd array of epoch number of each point.
midpts - nd array of midtransit times associated with all points.
err - array of errors on each midtransit time.
"""
end = int(floor((t[-1] - t0) / p) + 1)
start = int(floor((t[0] - t0) / p))
cnt = 0
epochs = []
dt_all = []
midpts = []
err = []
valid_trans = []
params = Parameters()
if sech:
w0 = p0[0]
depth = p0[1]
params.add('tc', value=0, vary=True, min=-0.1, max=0.1)
params.add('b', value=depth * 2, vary=False)
params.add('w', value=w0 * p, vary=False)
params.add('a0', value=1)
params.add('a1', value=0)
else:
depth = -p0[2] ** 2
params.add('tc', value=0, vary=True, min=-0.1, max=0.1)
params.add('b', value=p0[0], vary=False)
params.add('Rs_a', value=p0[1], vary=False)
params.add('Rp_Rs', value=p0[2], vary=False)
params.add('F', value=1, vary=False)
params.add('gamma1', value=p0[3], vary=False) # should I let these float?
params.add('gamma2', value=p0[4], vary=False)
params.add('a0', value=1, vary=True)
params.add('a1', value=0, vary=True)
for i in range(start, end):
print 'Transit number ' + str(cnt)
midt = i * p + t0
dt = t - midt
oot = np.where((abs(dt) > window) & (abs(dt) < window + 0.2 * p))[0]
if len(oot) <= 1:
continue
fn = f / np.median(f[oot])
select = np.where(abs(dt) < (window + 0.1 * p))[0] # select single transit
good = np.where(abs(dt) <= p / 2)[0] # all points belonging to current transit
if plots:
if cnt % 8 == 0:
plt.close('all')
fig, ax = plt.subplots(8, figsize=(6, 12), sharex=True)
if plotbad or (len(select) > 5):
ax[cnt % 8].plot(dt[select], fn[select], lw=0, marker='.')
ax[cnt % 8].axvline(x=0, color='k', ls='--')
ax[cnt % 8].set_xlabel('Time from midtransit (days)')
ax[cnt % 8].set_ylabel('Relative flux')
ax[cnt % 8].set_ylim(1 + depth - 0.0003, 1 + 0.0003)
ax[cnt % 8].set_xlim(-0.3, 0.3)
ax[cnt % 8].locator_params(axis='y', nbins=5)
ax[cnt % 8].get_yaxis().get_major_formatter().set_useOffset(False)
ax[cnt % 8].annotate(str(cnt), xy=(0.85, 0.1), xycoords='axes fraction', size=15)
dt_all += list(dt[good])
if len(select) > 5:
# fit sech to each transit
try:
fit = minimize(residual, params, args=(dt[select], fn[select], p, sech))
fiterr = np.sqrt(fit.covar[0][0])
err.append(fiterr)
midpts += len(good) * [fit.params['tc'].value + i * p + t0]
epochs += len(good) * [i]
if plots:
tc = fit.params['tc'].value
a0 = fit.params['a0'].value
a1 = fit.params['a1'].value
tarr = np.linspace(dt[select][0], dt[select][-1], 200)
if sech:
fmod = rect_sechmod(tarr, depth * 2, tc, w0 * p, a0, a1)
else:
fmod = model_transits.modeltransit([fit.params['tc'].value, fit.params['b'].value,
fit.params['Rs_a'].value, fit.params['Rp_Rs'].value, 1,
fit.params['gamma1'].value,
fit.params['gamma2'].value], model_transits.occultquad, p,
tarr)
fmod *= (fit.params['a0'].value + fit.params['a1'].value * tarr)
ax[cnt % 8].plot(tarr, fmod, color='r')
valid_trans.append(i)
except TypeError:
midpts += len(good) * [np.nan]
epochs += len(good) * [np.nan]
err.append(np.nan)
print 'Fit failed'
pass
else:
midpts += len(good) * [np.nan]
err.append(np.nan)
epochs += len(good) * [np.nan]
print 'Too few data points'
if plots and ((cnt % 8 == 7) or (i == end - 1)):
plt.savefig('outputs/' + name + 'alltrans' + str(ceil(cnt / 8. + 0.01)) + '.pdf', dpi=150,
bbox_inches='tight')
if plotbad or (len(select) > 5):
cnt += 1
print 'total transits:', cnt
epochs = np.array(epochs)
print 'good transits:', np.unique(epochs[np.where(~np.isnan(epochs))[0]])
return np.array(dt_all), epochs, np.array(midpts), np.array(err)
def make_folded_lc(dt, f, epochs, midpts, window, fig=None):
"""Returns dt, flux, epochs and midpoints belonging to data points within transit windows.
Makes plot of folded light curve if fig parameter is not None.
:param dt: nd array showing the time (days) to the nearest transit for each point. Output dt_all of plot_indiv_trans.
:param f: nd array of normalized fluxes.
:param epochs: nd array of epoch numbers associated with all points.
:param midpts: nd array of midtransit times associated with all points.
:param window: approximate length of transit window (days). Include at least half a transit's worth of out-of-transit
light curve on either side of dip.
:param fig: plot handle indicating desired plot dimensions. e.g. fig = plt.figure(figsize=(10,4)).
No plots will be made if set to None.
:return:
dt_tra - array of selected dt that fall within transit windows.
f_tra - array of selected fluxes that fall within transit windows.
epochs_tra - array of selected epochs that fall within transit windows.
midpts_tra - array of selected midtransit times that fall within transit windows.
fig - plot handle of folded transit.
"""
transwindow = np.where(abs(dt) < window * 2.2) # transit window size is hard to determine
dt_tra = dt[transwindow]
f_tra = f[transwindow]
epochs_tra = epochs[transwindow]
midpts_tra = midpts[transwindow]
oot = np.where(abs(dt_tra) > window)[0]
error = np.std(f_tra[oot])
if fig is not None:
plt.close('all')
# fig = plt.figure(figsize=(10, 4))
plt.plot(dt_tra, f_tra, lw=0, marker='.', color='b')
plt.axvline(x=0, ls='--', color='k')
plt.xlabel('t-tc (days)')
plt.ylabel('Relative flux')
# plt.savefig('outputs/' + name + '_folded.pdf', dpi=150)
order = sorted(range(len(dt_tra)), key=lambda k: dt_tra[k])
dt_tra = dt_tra[order]
f_tra = f_tra[order]
epochs_tra = epochs_tra[order]
midpts_tra = midpts_tra[order]
if fig is not None:
return dt_tra, f_tra, epochs_tra, midpts_tra, fig
else:
return dt_tra, f_tra, epochs_tra, midpts_tra
def get_fold_fit(dt_tra, f_tra, depth, period, window, fig=None):
"""Uses lmfit to get a good estimate of the Mandel-Agol parameters from the folded light curve. The curve fitting
routine will then be rerun using these better parameters.
:param dt_tra: array of selected dt that fall within transit windows.
:param f_tra: array of selected fluxes that fall within transit windows.
:param depth: estimate of transit depth obtained from sech fit. Defined as negative.
:param period: estimate of transit period (days).
:param window: approximate length of transit window (days). Include at least half a transit's worth of out-of-transit
light curve on either side of dip.
:param fig: plot handle indicating desired plot dimensions. e.g. fig = plt.figure(figsize=(10,4)).
No plots will be made if set to None.
:return:
fit - best-fit Mandel-Agol parameters from lmfit.minimise(). Contains the following params:
tc - midtransit time. Centred at 0.
b - impact parameter.
Rs_a - radius of star/semimajor axis.
F - out-of-transit flux, fixed at 1.
gamma1, gamma2 - quadratic limb darkening parameters from Mandel & Agol (2002)
fig - plot handle of folded transit with best-fit model.
"""
# plotting can always be skipped now unless you want to debug
params = Parameters()
params.add('tc', value=0, vary=False, min=-0.1, max=0.1)
params.add('b', value=0.7, vary=True)
params.add('Rs_a', value=0.1, vary=True, min=0., max=0.5)
params.add('Rp_Rs', value=(-depth) ** 0.5, vary=True)
params.add('F', value=1, vary=False)
params.add('gamma1', value=0.3, vary=True, min=0, max=0.5) # should I let these float?
params.add('gamma2', value=0.3, vary=True, min=0, max=0.5)
params.add('a0', value=1, vary=False)
params.add('a1', value=0, vary=False)
fit = minimize(residual, params, args=(dt_tra, f_tra, period, False))
tarr = np.linspace(min(dt_tra), max(dt_tra), 100)
fmod = model_transits.modeltransit([fit.params['tc'].value, fit.params['b'].value, fit.params['Rs_a'].value,
fit.params['Rp_Rs'].value, 1, fit.params['gamma1'].value,
fit.params['gamma2'].value], model_transits.occultquad, period, tarr)
if fig is not None:
plt.close('all')
plt.plot(dt_tra, f_tra, lw=0, marker='.')
plt.plot(tarr, fmod, color='r')
plt.axvline(x=-window, color='k', ls='--')
plt.axvline(x=window, color='k', ls='--')
plt.xlabel('Time from midtransit (days)')
plt.ylabel('Relative flux')
if fig is not None:
return fit, fig
else:
return fit
def get_oc(all_epochs, all_midpts, err, fig=None):
"""Calculates accurate values for ephemeris and period. Plots O-C diagram if desired.
:param all_epochs: nd array of epoch numbers associated with all points. From plot_indiv_trans.
:param all_midpts: nd array of midtransit times associated with all points. From plot_indiv_trans.
:param err: array of errors on midtransit times. One value for each unique time. From plot_indiv_trans.
:param fig: plot handle indicating desired plot dimensions. e.g. fig = plt.figure(figsize=(10,4)).
No plots will be made if set to None.
:return:
p_fit - best-fit transit period (days).
t0_fit - best-fit transit ephemeris.
fig - plot handle for O-C plot.
"""
try:
epochs = np.unique(all_epochs[np.where(~np.isnan(all_epochs))[0]])
midpts = np.unique(all_midpts[np.where(~np.isnan(all_midpts))[0]])
err = np.unique(err[np.where(~np.isnan(err))[0]])
except:
print('Error: invalid epochs and/or ephemerides')
raise
if len(epochs) > 2:
coeffs, cov = np.polyfit(epochs, midpts, 1, cov=True)
p_fit = coeffs[0]
p_err = np.sqrt(cov[0, 0])
t0_fit = coeffs[1]
t0_err = np.sqrt(cov[1, 1])
else:
p_fit = (midpts[1] - midpts[0]) / (epochs[1] - epochs[0])
p_err = 0
t0_fit = (midpts[1] * epochs[0] - midpts[0] * epochs[1]) / (epochs[0] - epochs[1])
t0_err = 0
print 'p=', p_fit, '+-', p_err
print 't0=', t0_fit, '+-', t0_err
if len(epochs) > 2:
fit = np.polyval(coeffs, epochs)
oc = (midpts - fit) * 24.
else:
oc = midpts * 0
err = np.array(err) * 24.
if fig is not None:
plt.close('all')
# fig = plt.figure(figsize=(9, 4))
plt.errorbar(epochs, oc, yerr=err, fmt='o')
plt.axhline(color='k', ls='--')
plt.ylabel('O-C (hours)')
plt.xlabel('Epochs')
plt.xlim(-0.1, max(epochs) + 1)
# plt.savefig('outputs/' + name + '_oc.pdf', dpi=150, bbox_inches='tight')
if fig is not None:
return p_fit, t0_fit, fig
else:
return p_fit, t0_fit
def odd_even(dt_tra, f_tra, epochs_tra, window, period, p0):
"""Plots odd vs. even transits and calculates difference in depth.
:param dt_tra: see get_fold_fit.
:param f_tra: see get_fold_fit.
:param epochs_tra: see get_fold_fit.
:param window: see get_fold_fit.
:param period: see get_fold_fit.
:param p0: good estimate of Mandel-Agol parameters from get_fold_fit. p0 = [b, Rs_a, Rp_Rs, gamma1, gamma2]
:return:
fig - plot handle for odd-even comparison plot.
"""
#
odd = np.where(epochs_tra % 2 != 0)[0]
even = np.where(epochs_tra % 2 == 0)[0]
params = Parameters()
params.add('tc', value=0, vary=False)
params.add('b', value=p0[0], vary=True)
params.add('Rs_a', value=p0[1], vary=True, min=0., max=0.5)
params.add('Rp_Rs', value=p0[2], vary=True)
params.add('F', value=1, vary=False)
params.add('gamma1', value=p0[3], vary=False)
params.add('gamma2', value=p0[4], vary=False)
params.add('a0', value=1, vary=False)
params.add('a1', value=0, vary=False)
fit_odd = minimize(residual, params, args=(dt_tra[odd], f_tra[odd], period, False))
fit_even = minimize(residual, params, args=(dt_tra[even], f_tra[even], period, False))
oot = np.where(abs(dt_tra) > window)[0]
sigma = np.std(dt_tra[oot])
tarr = np.linspace(min(dt_tra), max(dt_tra), 200)
oddmod = model_transits.modeltransit([fit_odd.params['tc'].value, fit_odd.params['b'].value,
fit_odd.params['Rs_a'].value, fit_odd.params['Rp_Rs'].value, 1,
fit_odd.params['gamma1'].value,
fit_odd.params['gamma2'].value], model_transits.occultquad, period, tarr)
evenmod = model_transits.modeltransit([fit_even.params['tc'].value, fit_even.params['b'].value,
fit_even.params['Rs_a'].value, fit_even.params['Rp_Rs'].value, 1,
fit_even.params['gamma1'].value,
fit_even.params['gamma2'].value], model_transits.occultquad, period, tarr)
odd_depth = min(oddmod)
even_depth = min(evenmod)
diff = abs(odd_depth - even_depth) / sigma
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(13, 5))
plt.subplots_adjust(wspace=0, hspace=0)
ax1.plot(dt_tra[odd] * 24., f_tra[odd], lw=0, marker='.')
ax1.plot(tarr * 24., oddmod, color='r')
ax1.axhline(y=odd_depth, color='k', ls='--')
ax1.set_xlabel('Time from midtransit (hours)')
ax1.set_ylabel('Relative flux')
ax1.set_xlim(min(dt_tra) * 24, max(dt_tra) * 24)
ax1.annotate('Odd', xy=(0.75, 0.15), xycoords='axes fraction', size=15)
ax2.plot(dt_tra[even] * 24., f_tra[even], lw=0, marker='.')
ax2.plot(tarr * 24., evenmod, color='r')
ax2.axhline(y=even_depth, color='k', ls='--')
ax2.set_xlabel('Time from midtransit (hours)')
ax2.set_xlim(min(dt_tra) * 24, max(dt_tra) * 24)
ax2.annotate('Even', xy=(0.75, 0.15), xycoords='axes fraction', size=15)
ax2.annotate('Diff: %.3f sigma' % diff, xy=(0.62, 0.05), xycoords='axes fraction', size=15)
# plt.savefig('outputs/' + name + '_oddeven.pdf', dpi=150, bbox_inches='tight')
return fig
def occultation(dt, f, p):
"""Plots folded light curve between two transits to check for secondary eclipses.
:param dt: nd array showing the time (days) to the nearest transit for each point. Output dt_all of plot_indiv_trans.
:param f: nd array of light curve flux.
:param p: best-fit period (days).
:return:
fig - plot handle of secondary eclipse plot.
"""
phase = dt / p
phase[np.where(phase < 0)] += 1
occ = np.where((phase > 0.2) & (phase < 0.8))
ph_occ = phase[occ]
f_occ = f[occ]
tbins = np.linspace(0.2, 0.8, 51)
fbin = []
stddev = []
for i in range(0, 50):
inds = np.where((ph_occ >= tbins[i]) & (ph_occ < tbins[i + 1]))[0]
fbin.append(np.mean(f_occ[inds]))
stddev.append(np.std(f_occ[inds]))
tbins = tbins[0:-1] + 0.6 / 50.
plt.close('all')
fig = plt.figure(figsize=(9, 4))
plt.plot(ph_occ, f_occ, lw=0, marker='.', color='0.75')
plt.plot(tbins, fbin, lw=2, color='r')
plt.xlabel('Phase')
plt.ylabel('Relative flux')
plt.title('Occultation')
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
# plt.savefig('outputs/' + name + '_occult.pdf', dpi=150, bbox_inches='tight')
return fig
def allparams(name, dat):
# lists all best-fit params and prints to PDF
# heading = '#P (days) t0 (-2454900) b Rs_a Rp/Rs Rp Best-fit T'
plt.close('all')
fig = plt.figure(figsize=[7, 7])
plt.plot([0, 1], [0, 1], lw=0)
plt.axis('off')
plt.annotate('P = ' + str(dat[0]) + ' days', xy=(0.1, 0.9), xycoords='axes fraction', size=20)
plt.annotate('t0 (-2454900) = ' + str(dat[1]), xy=(0.1, 0.8), xycoords='axes fraction', size=20)
plt.annotate('b = ' + str(dat[2]), xy=(0.1, 0.7), xycoords='axes fraction', size=20)
plt.annotate('Rs/a = ' + str(dat[3]), xy=(0.1, 0.6), xycoords='axes fraction', size=20)
plt.annotate('Rp/Rs = ' + str(dat[4]), xy=(0.1, 0.5), xycoords='axes fraction', size=20)
plt.annotate('Rp = ' + str(dat[5]), xy=(0.1, 0.4), xycoords='axes fraction', size=20)
plt.annotate('T from SED = ' + str(dat[6]) + ' K', xy=(0.1, 0.3), xycoords='axes fraction', size=20)
plt.savefig('outputs/' + name + '_params.pdf', dpi=150)
| mit |
libvirt/autotest | utils/external_packages.py | 1 | 27982 | # Please keep this code python 2.4 compatible and stand alone.
import logging, os, shutil, sys, tempfile, time, urllib2
import subprocess, re
from autotest_lib.client.common_lib import utils
_READ_SIZE = 64*1024
_MAX_PACKAGE_SIZE = 100*1024*1024
class Error(Exception):
"""Local exception to be raised by code in this file."""
class FetchError(Error):
"""Failed to fetch a package from any of its listed URLs."""
def _checksum_file(full_path):
"""@returns The hex checksum of a file given its pathname."""
inputfile = open(full_path, 'rb')
try:
hex_sum = utils.hash('sha1', inputfile.read()).hexdigest()
finally:
inputfile.close()
return hex_sum
def system(commandline):
"""Same as os.system(commandline) but logs the command first."""
logging.info(commandline)
return os.system(commandline)
def find_top_of_autotest_tree():
"""@returns The full path to the top of the autotest directory tree."""
dirname = os.path.dirname(__file__)
autotest_dir = os.path.abspath(os.path.join(dirname, '..'))
return autotest_dir
class ExternalPackage(object):
"""
Defines an external package with URLs to fetch its sources from and
a build_and_install() method to unpack it, build it and install it
beneath our own autotest/site-packages directory.
Base Class. Subclass this to define packages.
Attributes:
@attribute urls - A tuple of URLs to try fetching the package from.
@attribute local_filename - A local filename to use when saving the
fetched package.
@attribute hex_sum - The hex digest (currently SHA1) of this package
to be used to verify its contents.
@attribute module_name - The installed python module name to be used for
for a version check. Defaults to the lower case class name with
the word Package stripped off.
@attribute version - The desired minimum package version.
@attribute os_requirements - A dictionary mapping a file pathname on the
the OS distribution to a likely name of a package the user
needs to install on their system in order to get this file.
@attribute name - Read only, the printable name of the package.
@attribute subclasses - This class attribute holds a list of all defined
subclasses. It is constructed dynamically using the metaclass.
"""
subclasses = []
urls = ()
local_filename = None
hex_sum = None
module_name = None
version = None
os_requirements = None
class __metaclass__(type):
"""Any time a subclass is defined, add it to our list."""
def __init__(self, name, bases, d):
if name != 'ExternalPackage':
self.subclasses.append(self)
def __init__(self):
self.verified_package = ''
if not self.module_name:
self.module_name = self.name.lower()
self.installed_version = ''
@property
def name(self):
"""Return the class name with any trailing 'Package' stripped off."""
class_name = self.__class__.__name__
if class_name.endswith('Package'):
return class_name[:-len('Package')]
return class_name
def is_needed(self, unused_install_dir):
"""@returns True if self.module_name needs to be built and installed."""
if not self.module_name or not self.version:
logging.warning('version and module_name required for '
'is_needed() check to work.')
return True
try:
module = __import__(self.module_name)
except ImportError:
logging.info("%s isn't present. Will install.", self.module_name)
return True
self.installed_version = self._get_installed_version_from_module(module)
logging.info('imported %s version %s.', self.module_name,
self.installed_version)
if hasattr(self, 'minimum_version'):
return self.minimum_version > self.installed_version
else:
return self.version > self.installed_version
def _get_installed_version_from_module(self, module):
"""Ask our module its version string and return it or '' if unknown."""
try:
return module.__version__
except AttributeError:
logging.error('could not get version from %s', module)
return ''
def _build_and_install(self, install_dir):
"""Subclasses MUST provide their own implementation."""
raise NotImplementedError
def _build_and_install_current_dir(self, install_dir):
"""
Subclasses that use _build_and_install_from_package() MUST provide
their own implementation of this method.
"""
raise NotImplementedError
def build_and_install(self, install_dir):
"""
Builds and installs the package. It must have been fetched already.
@param install_dir - The package installation directory. If it does
not exist it will be created.
"""
if not self.verified_package:
raise Error('Must call fetch() first. - %s' % self.name)
self._check_os_requirements()
return self._build_and_install(install_dir)
def _check_os_requirements(self):
if not self.os_requirements:
return
failed = False
for file_name, package_name in self.os_requirements.iteritems():
if not os.path.exists(file_name):
failed = True
logging.error('File %s not found, %s needs it.',
file_name, self.name)
logging.error('Perhaps you need to install something similar '
'to the %s package for OS first.', package_name)
if failed:
raise Error('Missing OS requirements for %s. (see above)' %
self.name)
def _build_and_install_current_dir_setup_py(self, install_dir):
"""For use as a _build_and_install_current_dir implementation."""
egg_path = self._build_egg_using_setup_py(setup_py='setup.py')
if not egg_path:
return False
return self._install_from_egg(install_dir, egg_path)
def _build_and_install_current_dir_setupegg_py(self, install_dir):
"""For use as a _build_and_install_current_dir implementation."""
egg_path = self._build_egg_using_setup_py(setup_py='setupegg.py')
if not egg_path:
return False
return self._install_from_egg(install_dir, egg_path)
def _build_and_install_current_dir_noegg(self, install_dir):
if not self._build_using_setup_py():
return False
return self._install_using_setup_py_and_rsync(install_dir)
def _build_and_install_from_package(self, install_dir):
"""
This method may be used as a _build_and_install() implementation
for subclasses if they implement _build_and_install_current_dir().
Extracts the .tar.gz file, chdirs into the extracted directory
(which is assumed to match the tar filename) and calls
_build_and_isntall_current_dir from there.
Afterwards the build (regardless of failure) extracted .tar.gz
directory is cleaned up.
@returns True on success, False otherwise.
@raises OSError If the expected extraction directory does not exist.
"""
self._extract_compressed_package()
if self.verified_package.endswith('.tar.gz'):
extension = '.tar.gz'
elif self.verified_package.endswith('.tar.bz2'):
extension = '.tar.bz2'
elif self.verified_package.endswith('.zip'):
extension = '.zip'
else:
raise Error('Unexpected package file extension on %s' %
self.verified_package)
os.chdir(os.path.dirname(self.verified_package))
os.chdir(self.local_filename[:-len(extension)])
extracted_dir = os.getcwd()
try:
return self._build_and_install_current_dir(install_dir)
finally:
os.chdir(os.path.join(extracted_dir, '..'))
shutil.rmtree(extracted_dir)
def _extract_compressed_package(self):
"""Extract the fetched compressed .tar or .zip within its directory."""
if not self.verified_package:
raise Error('Package must have been fetched first.')
os.chdir(os.path.dirname(self.verified_package))
if self.verified_package.endswith('gz'):
status = system("tar -xzf '%s'" % self.verified_package)
elif self.verified_package.endswith('bz2'):
status = system("tar -xjf '%s'" % self.verified_package)
elif self.verified_package.endswith('zip'):
status = system("unzip '%s'" % self.verified_package)
else:
raise Error('Unknown compression suffix on %s.' %
self.verified_package)
if status:
raise Error('tar failed with %s' % (status,))
def _build_using_setup_py(self, setup_py='setup.py'):
"""
Assuming the cwd is the extracted python package, execute a simple
python setup.py build.
@param setup_py - The name of the setup.py file to execute.
@returns True on success, False otherwise.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
status = system("'%s' %s build" % (sys.executable, setup_py))
if status:
logging.error('%s build failed.' % self.name)
return False
return True
def _build_egg_using_setup_py(self, setup_py='setup.py'):
"""
Assuming the cwd is the extracted python package, execute a simple
python setup.py bdist_egg.
@param setup_py - The name of the setup.py file to execute.
@returns The relative path to the resulting egg file or '' on failure.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
egg_subdir = 'dist'
if os.path.isdir(egg_subdir):
shutil.rmtree(egg_subdir)
status = system("'%s' %s bdist_egg" % (sys.executable, setup_py))
if status:
logging.error('bdist_egg of setuptools failed.')
return ''
# I've never seen a bdist_egg lay multiple .egg files.
for filename in os.listdir(egg_subdir):
if filename.endswith('.egg'):
return os.path.join(egg_subdir, filename)
def _install_from_egg(self, install_dir, egg_path):
"""
Install a module from an egg file by unzipping the necessary parts
into install_dir.
@param install_dir - The installation directory.
@param egg_path - The pathname of the egg file.
"""
status = system("unzip -q -o -d '%s' '%s'" % (install_dir, egg_path))
if status:
logging.error('unzip of %s failed', egg_path)
return False
egg_info = os.path.join(install_dir, 'EGG-INFO')
if os.path.isdir(egg_info):
shutil.rmtree(egg_info)
return True
def _get_temp_dir(self):
return tempfile.mkdtemp(dir='/var/tmp')
def _site_packages_path(self, temp_dir):
# This makes assumptions about what python setup.py install
# does when given a prefix. Is this always correct?
python_xy = 'python%s' % sys.version[:3]
return os.path.join(temp_dir, 'lib', python_xy, 'site-packages')
def _install_using_setup_py_and_rsync(self, install_dir,
setup_py='setup.py',
temp_dir=None):
"""
Assuming the cwd is the extracted python package, execute a simple:
python setup.py install --prefix=BLA
BLA will be a temporary directory that everything installed will
be picked out of and rsynced to the appropriate place under
install_dir afterwards.
Afterwards, it deconstructs the extra lib/pythonX.Y/site-packages/
directory tree that setuptools created and moves all installed
site-packages directly up into install_dir itself.
@param install_dir the directory for the install to happen under.
@param setup_py - The name of the setup.py file to execute.
@returns True on success, False otherwise.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
if temp_dir is None:
temp_dir = self._get_temp_dir()
try:
status = system("'%s' %s install --no-compile --prefix='%s'"
% (sys.executable, setup_py, temp_dir))
if status:
logging.error('%s install failed.' % self.name)
return False
if os.path.isdir(os.path.join(temp_dir, 'lib')):
# NOTE: This ignores anything outside of the lib/ dir that
# was installed.
temp_site_dir = self._site_packages_path(temp_dir)
else:
temp_site_dir = temp_dir
status = system("rsync -r '%s/' '%s/'" %
(temp_site_dir, install_dir))
if status:
logging.error('%s rsync to install_dir failed.' % self.name)
return False
return True
finally:
shutil.rmtree(temp_dir)
def _build_using_make(self, install_dir):
"""Build the current package using configure/make.
@returns True on success, False otherwise.
"""
install_prefix = os.path.join(install_dir, 'usr', 'local')
status = system('./configure --prefix=%s' % install_prefix)
if status:
logging.error('./configure failed for %s', self.name)
return False
status = system('make')
if status:
logging.error('make failed for %s', self.name)
return False
status = system('make check')
if status:
logging.error('make check failed for %s', self.name)
return False
return True
def _install_using_make(self):
"""Install the current package using make install.
Assumes the install path was set up while running ./configure (in
_build_using_make()).
@returns True on success, False otherwise.
"""
status = system('make install')
return status == 0
def fetch(self, dest_dir):
"""
Fetch the package from one its URLs and save it in dest_dir.
If the the package already exists in dest_dir and the checksum
matches this code will not fetch it again.
Sets the 'verified_package' attribute with the destination pathname.
@param dest_dir - The destination directory to save the local file.
If it does not exist it will be created.
@returns A boolean indicating if we the package is now in dest_dir.
@raises FetchError - When something unexpected happens.
"""
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
local_path = os.path.join(dest_dir, self.local_filename)
# If the package exists, verify its checksum and be happy if it is good.
if os.path.exists(local_path):
actual_hex_sum = _checksum_file(local_path)
if self.hex_sum == actual_hex_sum:
logging.info('Good checksum for existing %s package.',
self.name)
self.verified_package = local_path
return True
logging.warning('Bad checksum for existing %s package. '
'Re-downloading', self.name)
os.rename(local_path, local_path + '.wrong-checksum')
# Download the package from one of its urls, rejecting any if the
# checksum does not match.
for url in self.urls:
logging.info('Fetching %s', url)
try:
url_file = urllib2.urlopen(url)
except (urllib2.URLError, EnvironmentError):
logging.warning('Could not fetch %s package from %s.',
self.name, url)
continue
data_length = int(url_file.info().get('Content-Length',
_MAX_PACKAGE_SIZE))
if data_length <= 0 or data_length > _MAX_PACKAGE_SIZE:
raise FetchError('%s from %s fails Content-Length %d '
'sanity check.' % (self.name, url,
data_length))
checksum = utils.hash('sha1')
total_read = 0
output = open(local_path, 'wb')
try:
while total_read < data_length:
data = url_file.read(_READ_SIZE)
if not data:
break
output.write(data)
checksum.update(data)
total_read += len(data)
finally:
output.close()
if self.hex_sum != checksum.hexdigest():
logging.warning('Bad checksum for %s fetched from %s.',
self.name, url)
logging.warning('Got %s', checksum.hexdigest())
logging.warning('Expected %s', self.hex_sum)
os.unlink(local_path)
continue
logging.info('Good checksum.')
self.verified_package = local_path
return True
else:
return False
# NOTE: This class definition must come -before- all other ExternalPackage
# classes that need to use this version of setuptools so that is is inserted
# into the ExternalPackage.subclasses list before them.
class SetuptoolsPackage(ExternalPackage):
# For all known setuptools releases a string compare works for the
# version string. Hopefully they never release a 0.10. (Their own
# version comparison code would break if they did.)
# Any system with setuptools > 0.6 is fine. If none installed, then
# try to install the latest found on the upstream.
minimum_version = '0.6'
version = '0.6c11'
urls = ('http://pypi.python.org/packages/source/s/setuptools/'
'setuptools-%s.tar.gz' % (version,),)
local_filename = 'setuptools-%s.tar.gz' % version
hex_sum = '8d1ad6384d358c547c50c60f1bfdb3362c6c4a7d'
SUDO_SLEEP_DELAY = 15
def _build_and_install(self, install_dir):
"""Install setuptools on the system."""
logging.info('NOTE: setuptools install does not use install_dir.')
return self._build_and_install_from_package(install_dir)
def _build_and_install_current_dir(self, install_dir):
egg_path = self._build_egg_using_setup_py()
if not egg_path:
return False
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'
print 'About to run sudo to install setuptools', self.version
print 'on your system for use by', sys.executable, '\n'
print '!! ^C within', self.SUDO_SLEEP_DELAY, 'seconds to abort.\n'
time.sleep(self.SUDO_SLEEP_DELAY)
# Copy the egg to the local filesystem /var/tmp so that root can
# access it properly (avoid NFS squashroot issues).
temp_dir = self._get_temp_dir()
try:
shutil.copy(egg_path, temp_dir)
egg_name = os.path.split(egg_path)[1]
temp_egg = os.path.join(temp_dir, egg_name)
p = subprocess.Popen(['sudo', '/bin/sh', temp_egg],
stdout=subprocess.PIPE)
regex = re.compile('Copying (.*?) to (.*?)\n')
match = regex.search(p.communicate()[0])
status = p.wait()
if match:
compiled = os.path.join(match.group(2), match.group(1))
os.system("sudo chmod a+r '%s'" % compiled)
finally:
shutil.rmtree(temp_dir)
if status:
logging.error('install of setuptools from egg failed.')
return False
return True
class MySQLdbPackage(ExternalPackage):
module_name = 'MySQLdb'
version = '1.2.2'
urls = ('http://downloads.sourceforge.net/project/mysql-python/'
'mysql-python/%(version)s/MySQL-python-%(version)s.tar.gz'
% dict(version=version),)
local_filename = 'MySQL-python-%s.tar.gz' % version
hex_sum = '945a04773f30091ad81743f9eb0329a3ee3de383'
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setup_py)
def _build_and_install(self, install_dir):
if not os.path.exists('/usr/bin/mysql_config'):
logging.error('You need to install /usr/bin/mysql_config')
logging.error('On Ubuntu or Debian based systems use this: '
'sudo apt-get install libmysqlclient15-dev')
return False
return self._build_and_install_from_package(install_dir)
class DjangoPackage(ExternalPackage):
version = '1.3'
local_filename = 'Django-%s.tar.gz' % version
urls = ('http://www.djangoproject.com/download/%s/tarball/' % version,)
hex_sum = 'f8814d5e1412bb932318db5130260da5bf053ff7'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
def _get_installed_version_from_module(self, module):
try:
return module.get_version().split()[0]
except AttributeError:
return '0.9.6'
class NumpyPackage(ExternalPackage):
version = '1.2.1'
local_filename = 'numpy-%s.tar.gz' % version
urls = ('http://downloads.sourceforge.net/project/numpy/NumPy/%(version)s/'
'numpy-%(version)s.tar.gz' % dict(version=version),)
hex_sum = '1aa706e733aea18eaffa70d93c0105718acb66c5'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setupegg_py)
# This requires numpy so it must be declared after numpy to guarantee that it
# is already installed.
class MatplotlibPackage(ExternalPackage):
version = '0.98.5.3'
short_version = '0.98.5'
local_filename = 'matplotlib-%s.tar.gz' % version
urls = ('http://downloads.sourceforge.net/project/matplotlib/matplotlib/'
'matplotlib-%s/matplotlib-%s.tar.gz' % (short_version, version),)
hex_sum = '2f6c894cf407192b3b60351bcc6468c0385d47b6'
os_requirements = {'/usr/include/ft2build.h': 'libfreetype6-dev',
'/usr/include/png.h': 'libpng12-dev'}
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setupegg_py)
class AtForkPackage(ExternalPackage):
version = '0.1.2'
local_filename = 'atfork-%s.zip' % version
urls = ('http://python-atfork.googlecode.com/files/' + local_filename,)
hex_sum = '5baa64c73e966b57fa797040585c760c502dc70b'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
class ParamikoPackage(ExternalPackage):
version = '1.7.5'
local_filename = 'paramiko-%s.tar.gz' % version
urls = ('http://www.lag.net/paramiko/download/' + local_filename,
'ftp://mirrors.kernel.org/gentoo/distfiles/' + local_filename,)
hex_sum = '592be7a08290070b71da63a8e6f28a803399e5c5'
_build_and_install = ExternalPackage._build_and_install_from_package
def _check_for_pycrypto(self):
# NOTE(gps): Linux distros have better python-crypto packages than we
# can easily get today via a wget due to the library's age and staleness
# yet many security and behavior bugs are fixed by patches that distros
# already apply. PyCrypto has a new active maintainer in 2009. Once a
# new release is made (http://pycrypto.org/) we should add an installer.
try:
import Crypto
except ImportError:
logging.error('Please run "sudo apt-get install python-crypto" '
'or your Linux distro\'s equivalent.')
return False
return True
def _build_and_install_current_dir(self, install_dir):
if not self._check_for_pycrypto():
return False
# paramiko 1.7.4 doesn't require building, it is just a module directory
# that we can rsync into place directly.
if not os.path.isdir('paramiko'):
raise Error('no paramiko directory in %s.' % os.getcwd())
status = system("rsync -r 'paramiko' '%s/'" % install_dir)
if status:
logging.error('%s rsync to install_dir failed.' % self.name)
return False
return True
class SimplejsonPackage(ExternalPackage):
version = '2.0.9'
local_filename = 'simplejson-%s.tar.gz' % version
urls = ('http://pypi.python.org/packages/source/s/simplejson/' +
local_filename,)
hex_sum = 'b5b26059adbe677b06c299bed30557fcb0c7df8c'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setup_py)
class Httplib2Package(ExternalPackage):
version = '0.6.0'
local_filename = 'httplib2-%s.tar.gz' % version
urls = ('http://httplib2.googlecode.com/files/' + local_filename,)
hex_sum = '995344b2704826cc0d61a266e995b328d92445a5'
def _get_installed_version_from_module(self, module):
# httplib2 doesn't contain a proper version
return self.version
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
class GwtPackage(ExternalPackage):
"""Fetch and extract a local copy of GWT used to build the frontend."""
version = '2.4.0'
local_filename = 'gwt-%s.zip' % version
urls = ('http://google-web-toolkit.googlecode.com/files/' + local_filename,)
hex_sum = 'a91ac20db0ddd5994ac3cbfb0e8061d5bbf66f88'
name = 'gwt'
about_filename = 'about.txt'
module_name = None # Not a Python module.
def is_needed(self, install_dir):
gwt_dir = os.path.join(install_dir, self.name)
about_file = os.path.join(install_dir, self.name, self.about_filename)
if not os.path.exists(gwt_dir) or not os.path.exists(about_file):
logging.info('gwt not installed for autotest')
return True
f = open(about_file, 'r')
version_line = f.readline()
f.close()
match = re.match(r'Google Web Toolkit (.*)', version_line)
if not match:
logging.info('did not find gwt version')
return True
logging.info('found gwt version %s', match.group(1))
return match.group(1) != self.version
def _build_and_install(self, install_dir):
if not os.path.isdir(install_dir):
os.makedirs(install_dir)
os.chdir(install_dir)
self._extract_compressed_package()
extracted_dir = self.local_filename[:-len('.zip')]
target_dir = os.path.join(install_dir, self.name)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
os.rename(extracted_dir, target_dir)
return True
| gpl-2.0 |
sindresf/The-Playground | Python/Machine Learning/SummerFeast/SummerChallengeScorer/SummerChallengeScorer/data_reworker_for_model.py | 1 | 6844 | import numpy as np
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
import math
from keras.models import Sequential
from keras.models import load_model
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
#DEFINE STARTING VALUES
range_start = 60
range_end = 1200
step = 15
data_range = np.arange(range_start,range_end,step)
rand_seed = 21
np.random.seed(rand_seed)
look_back = 5
csv_path = 'W:\Datasets\synth_scoring\lines.csv'
lines = np.loadtxt(csv_path)
alco = np.array([line[0] for line in lines])
lines = np.asarray([np.array(line[1:], dtype='float32') for line in lines])
val_scaler = MinMaxScaler(feature_range=(0.0,1.0))
time_scaler = MinMaxScaler(feature_range=(0.0,1.0))
all_vals = np.array([],dtype='float32')
for line in lines:
all_vals = np.append(all_vals,line)
all_vals_scaled = val_scaler.fit_transform(all_vals.reshape(-1, 1))
data_range_scaled = time_scaler.fit_transform(data_range.reshape(-1, 1))
line_index_jump = 0
for i in range(0,len(lines)):
for j in range(0,len(lines[i])):
lines[i,j] = all_vals_scaled[j + line_index_jump]
line_index_jump += len(lines[i])
def get_line_split(line,alco=1.0, look_back=1):
line_look_back, line_pred = [],[]
for i in range(1,len(line) - look_back - 2):
xb = np.array(line[i:(i + look_back + 1)])
xa = [np.array([v], dtype='float32') for v in xb]
yb = line[i + look_back]
for x in range(i, i + look_back + 1):
if(x == 1):
xa[x - i] = np.array([line[0], data_range_scaled[1], alco], dtype='float32')
else:
xa[x - i] = np.array([xb[x - i - 1],data_range_scaled[x], alco], dtype='float32')
line_look_back.append(np.array(xa))
line_pred.append(yb)
return np.array(line_look_back), np.array(line_pred)
def create_lines_splits(lines, alcos, look_back=1):
lines_look_back, lines_pred = [],[]
alco_count = 0
for line in lines:
lb,pred = get_line_split(line,alcos[alco_count],look_back)
lines_look_back.append(lb)
lines_pred.append(pred)
alco_count += 1
return np.array(lines_look_back), np.array(lines_pred)
lines_look_back, lines_pred = create_lines_splits(lines,alco,look_back)
def create_train_test_split(look_backs, preds, train_size=0.67):
train_LB, train_pred, test_LB, test_pred = [],[],[],[]
split_index = int(len(look_backs) * train_size)
train_LB = look_backs[:split_index]
train_pred = preds[:split_index]
test_LB = look_backs[split_index:]
test_pred = preds[split_index:]
return np.array(train_LB),np.array(train_pred),np.array(test_LB), np.array(test_pred)
train_LB, train_pred, test_LB, test_pred = create_train_test_split(lines_look_back,lines_pred)
def get_LSTM_line_structure(line):
return np.reshape(line, (line.shape[0], line.shape[2], line.shape[1]))
def get_all_LSTM_line_structures(lines):
lines_reshaped = []
for line in lines:
ln = get_LSTM_line_structure(line)
lines_reshaped.append(ln)
return np.array(lines_reshaped)
train_LB_X = get_all_LSTM_line_structures(train_LB)
test_LB_X = get_all_LSTM_line_structures(test_LB)
def train_on_one_line(model, line, line_pred, epochs):
model.fit(line, line_pred, epochs = epochs, batch_size =batch_size, verbose = 2)
def train_on_some_lines(model, lines, lines_preds, line_numbers=[], epochs=[]):
epoch_index = 0
for index in line_numbers:
print('training on line no ' + str(index))
train_on_one_line(model, lines[index], lines_preds[index], epochs[epoch_index])
epoch_index = (epoch_index + 1) % len(epochs)
print()
def train_on_all_lines(model, lines, lines_preds, epochs):
for line,pred in zip(lines,lines_preds):
train_on_one_line(model,line,pred,epochs)
#model train try
batch_size = 17
model = Sequential()
model.add(LSTM(100, stateful=True, return_sequences=True, input_shape=(3, look_back + 1), recurrent_activation="tanh", batch_size=batch_size))
model.add(Activation("tanh"))
model.add(Dropout(0.15))
model.add(LSTM(110, stateful=True, return_sequences=True, recurrent_activation="tanh", batch_size=batch_size))
model.add(Activation("tanh"))
model.add(Dropout(0.15))
model.add(LSTM(80, stateful=True, return_sequences=False, recurrent_activation="tanh", batch_size=batch_size))
model.add(Activation("tanh"))
model.add(Dropout(0.15))
model.add(Dense(1))
model.add(Activation("relu"))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
train_on_one_line(model,train_LB_X[214],train_pred[214],epochs=120)
train_on_one_line(model,train_LB_X[212],train_pred[212],epochs=120)
train_on_one_line(model,train_LB_X[9],train_pred[9],epochs=80)
train_on_all_lines(model,train_LB_X[3:30],train_pred[3:30],30)
train_on_all_lines(model,train_LB_X,train_pred,17)
train_on_some_lines(model,train_LB_X,train_pred,np.random.randint(0,len(train_LB_X),50),epochs=np.random.randint(15,35,15))
print('saving model as h5')
model_path = 'W:\Datasets\synth_scoring\model2.h5'
model.save(model_path)
print()
print('scoring!')
print()
# make predictions
trainPredict214 = model.predict(train_LB_X[214],batch_size = batch_size)
trainPredict165 = model.predict(train_LB_X[165],batch_size = batch_size)
trainPredict9 = model.predict(train_LB_X[9],batch_size = batch_size)
trainPredict280 = model.predict(train_LB_X[280],batch_size = batch_size)
testPredict49 = model.predict(test_LB_X[49],batch_size = batch_size)
# calculate root mean squared error
#trainPredict = scaler.inverse_transform(trainPredict)
#train_pred = scaler.inverse_transform([train_pred])
#testPredict = scaler.inverse_transform(testPredict)
#test_pred = scaler.inverse_transform([test_pred])
trainScore = math.sqrt(mean_squared_error(train_pred[214], trainPredict214))
trainScore += math.sqrt(mean_squared_error(train_pred[165], trainPredict165))
trainScore += math.sqrt(mean_squared_error(train_pred[9], trainPredict9))
trainScore += math.sqrt(mean_squared_error(train_pred[280], trainPredict280))
trainScore /= 4.0
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(test_pred[49], testPredict49))
print('Test Score: %.2f RMSE' % (testScore))
print()
print('plotting prediction')
#plt.plot(train_LB_X_plot_vals,color='green', label='training data',
#linewidth=5)
plt.plot(trainPredict214,color = 'blue', label = '214')
plt.plot(trainPredict165,color = 'gray', label = '165')
plt.plot(trainPredict9,color = 'green', label = '9')
plt.plot(trainPredict280,color = 'gray', linewidth=3)
plt.plot(trainPredict280,color = 'yellow', label = '280')
#plt.plot(train_LB_X_plot_vals,color='orange', label='test data', linewidth=3)
plt.plot(testPredict49,color='red', label='test predictions')
plt.legend(loc = 'upper right')
plt.show() | mit |
paris-saclay-cds/ramp-workflow | rampwf/tests/kits/boston_housing/problem.py | 1 | 1048 | import os
import pandas as pd
from sklearn.model_selection import ShuffleSplit
import rampwf as rw
problem_title = 'Boston housing price regression'
_target_column_name = 'medv'
# A type (class) which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_regression()
# An object implementing the workflow
workflow = rw.workflows.Estimator()
score_types = [
rw.score_types.RMSE(),
rw.score_types.RelativeRMSE(name='rel_rmse'),
rw.score_types.NormalizedRMSE(name='n_rmse'),
]
def get_cv(X, y):
cv = ShuffleSplit(n_splits=2, test_size=0.2, random_state=57)
return cv.split(X)
def _read_data(path, f_name):
data = pd.read_csv(os.path.join(path, 'data', f_name))
y_array = data[_target_column_name].values
X_array = data.drop([_target_column_name], axis=1).values
return X_array, y_array
def get_train_data(path='.'):
f_name = 'train.csv'
return _read_data(path, f_name)
def get_test_data(path='.'):
f_name = 'test.csv'
return _read_data(path, f_name)
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
Vimos/scikit-learn | sklearn/neighbors/graph.py | 36 | 6650 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self : bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self : bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
Alex-Ian-Hamilton/sunpy | sunpy/map/sources/tests/test_eit_source.py | 1 | 1215 | """Test cases for SOHO Map subclasses.
This particular test file pertains to EITMap.
@Author: Pritish C. (VaticanCameos)
"""
import os
import glob
import numpy as np
from matplotlib import colors
import pytest
from sunpy.map.sources.soho import EITMap
from sunpy.map import Map
import sunpy.data.test
path = sunpy.data.test.rootdir
fitslist = glob.glob(os.path.join(path, "EIT", "*"))
@pytest.fixture(scope="module", params=fitslist)
def createEIT(request):
"""Creates an EITMap from a FITS file."""
return Map(request.param)
# EIT Tests
def test_fitstoEIT(createEIT):
"""Tests the creation of EITMap using FITS."""
assert isinstance(createEIT, EITMap)
def test_is_datasource_for(createEIT):
"""Test the is_datasource_for method of EITMap.
Note that header data to be provided as an argument
can be a MapMeta object."""
assert createEIT.is_datasource_for(createEIT.data, createEIT.meta)
def test_observatory(createEIT):
"""Tests the observatory property of the EITMap object."""
assert createEIT.observatory == "SOHO"
def test_measurement(createEIT):
"""Tests the measurement property of the EITMap object."""
assert createEIT.measurement.value in [195, 171] | bsd-2-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/computation/pytables.py | 9 | 20208 | """ manage PyTables query interface via Expressions """
import ast
import time
import warnings
from functools import partial
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.compat import u, string_types, PY3, DeepChainMap
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.computation import expr, ops
from pandas.computation.ops import is_term, UndefinedVariableError
from pandas.computation.scope import _ensure_scope
from pandas.computation.expr import BaseExprVisitor
from pandas.computation.common import _ensure_decoded
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not com.is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs),'kind',None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs),'meta',None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs),'metadata',None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif (isinstance(v, datetime) or hasattr(v, 'timetuple') or
kind == u('date')):
v = time.mktime(v.timetuple())
return TermValue(v, pd.Timestamp(v), kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v,side='left')
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif not isinstance(v, string_types):
v = stringify(v)
return TermValue(v, stringify(v), u('string'))
# string quoting
return TermValue(v, stringify(v), u('string'))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return com.pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return com.pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, op=None, value=None, queryables=None,
encoding=None, scope_level=0):
# try to be back compat
where = self.parse_back_compat(where, op, value)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = self.parse_back_compat(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where])
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def parse_back_compat(self, w, op=None, value=None):
""" allow backward compatibility for passed arguments """
if isinstance(w, dict):
w, op, value = w.get('field'), w.get('op'), w.get('value')
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
warnings.warn("passing a dict to Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning)
if isinstance(w, tuple):
if len(w) == 2:
w, value = w
op = '=='
elif len(w) == 3:
w, op, value = w
warnings.warn("passing a tuple into Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning, stacklevel=10)
if op is not None:
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
if isinstance(op, Expr):
raise TypeError("invalid op passed, must be a string")
w = "{0}{1}".format(w, op)
if value is not None:
if isinstance(value, Expr):
raise TypeError("invalid value passed, must be a string")
# stringify with quotes these values
def convert(v):
if isinstance(v, (datetime,np.datetime64,timedelta,np.timedelta64)) or hasattr(v, 'timetuple'):
return "'{0}'".format(v)
return v
if isinstance(value, (list,tuple)):
value = [ convert(v) for v in value ]
else:
value = convert(value)
w = "{0}{1}".format(w, value)
warnings.warn("passing multiple values to Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning)
return w
def __unicode__(self):
if self.terms is not None:
return com.pprint_thing(self.terms)
return com.pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u('string'):
if encoding is not None:
return self.converted
return '"%s"' % self.converted
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| mit |
NeCTAR-RC/karaage-user | kguser/conf/test_settings.py | 1 | 3497 | # Django settings for grunt project.
from os import uname
AJAX_LOOKUP_CHANNELS = {
'person' : ( 'karaage.people.lookups', 'PersonLookup'),
'group' : ( 'karaage.people.lookups', 'GroupLookup'),
'project' : ( 'karaage.projects.lookups', 'ProjectLookup'),
}
class InvalidString(str):
def __mod__(self, other):
from django.template.base import TemplateSyntaxError
raise TemplateSyntaxError(
"Undefined variable or unknown value for: \"%s\"" % other)
TEMPLATE_STRING_IF_INVALID = InvalidString("%s")
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SHIB_SUPPORTED = False
GRAPH_DEBUG = True
SOUTH_TESTS_MIGRATE = False
# DATASTORES
MACHINE_CATEGORY_DATASTORES = {
'default' : [
{
'DESCRIPTION': 'Default LDAP datastore',
'ENGINE': 'kgkeystone.datastore.ldap.MachineCategoryDataStore',
'LDAP': 'default',
'ACCOUNT': 'karaage.datastores.ldap_schemas.openldap_account',
'GROUP': 'karaage.datastores.ldap_schemas.openldap_account_group',
'PRIMARY_GROUP': "institute",
'DEFAULT_PRIMARY_GROUP': "dummy",
'HOME_DIRECTORY': "/home/%(uid)s",
'LOCKED_SHELL': "/usr/local/sbin/locked",
}, {
'DESCRIPTION': 'Keystone datastore',
'ENGINE': 'kgkeystone.datastore.keystone.MachineCategoryDataStore',
'VERSION': 'v3',
'ENDPOINT': 'http://localhost:35357/v3/',
'TOKEN': 'ADMIN',
'LEADER_ROLE': 'TenantManager',
'MEMBER_ROLE': 'Member',
'HOST': '127.0.0.1',
'PORT': '35357',
'PROTOCOL': 'http',
'PROJECT_NAME': 'admin',
'USERNAME': 'karaage',
'PASSWORD': 'test',
},
],
'dummy' : [
],
}
# OTHER
ACCOUNTS_ORG_NAME = 'TestOrg'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'karaage.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
LDAP = {
'default': {
'ENGINE': 'tldap.backend.fake_transactions',
'URI': 'ldap://localhost:38911/',
'USER': 'cn=Manager,dc=python-ldap,dc=org',
'PASSWORD': 'password',
'USE_TLS': False,
'TLS_CA' : None,
'LDAP_ACCOUNT_BASE': 'ou=People, dc=python-ldap,dc=org',
'LDAP_GROUP_BASE': 'ou=Group, dc=python-ldap,dc=org'
}
}
LDAP_TEST_DATASTORE = 'ldap'
LDAP_TEST_DATASTORE_N = 0
SERVER_EMAIL = 'django@' + uname()[1]
ACCOUNTS_EMAIL = '[email protected]'
APPROVE_ACCOUNTS_EMAIL = ACCOUNTS_EMAIL
EMAIL_SUBJECT_PREFIX = '[Grunt VPAC] - '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TIME_ZONE = 'Australia/Melbourne'
LANGUAGE_CODE = 'en-au'
GRAPH_ROOT = '/tmp/graphs/'
GRAPH_TMP = '/tmp/matplotlib/'
GRAPH_URL = '/media/graphs/'
INTERNAL_IPS = (
'127.0.0.1',
)
AUP_URL = 'http://example.com/aup.html'
ALLOW_REGISTRATIONS = True
REGISTRATION_BASE_URL = 'https://example.com/users'
SECRET_KEY = '5hvhpe6gv2t5x4$3dtq(w2v#vg@)sx4p3r_@wv%l41g!stslc*'
STATIC_URL = "/static/"
| gpl-3.0 |
jereze/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
0asa/scikit-learn | sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
gdhungana/desispec | py/desispec/scripts/qa_prod.py | 1 | 2639 | # Script for generating QA from a Production run
from __future__ import absolute_import, division
from desispec.qa import QA_Prod
from desispec.log import get_logger
import argparse
import numpy as np
def parse(options=None):
parser = argparse.ArgumentParser(description="Generate Production Level QA")
parser.add_argument('--specprod_dir', type = str, default = None, required=True,
help = 'Path containing the exposures/directory to use')
parser.add_argument('--make_frameqa', type = int, default = 0,
help = 'Bitwise flag to control remaking the QA files (1) and figures (2) for each frame in the production')
parser.add_argument('--slurp', default = False, action='store_true',
help = 'slurp production QA files into one?')
parser.add_argument('--remove', default = False, action='store_true',
help = 'remove frame QA files?')
parser.add_argument('--clobber', default = True, action='store_true',
help = 'clobber existing QA files?')
parser.add_argument('--channel_hist', type=str, default=None,
help='Generate channel histogram(s)')
args = None
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args) :
log=get_logger()
log.info("starting")
qa_prod = QA_Prod(args.specprod_dir)
# Remake Frame QA?
if args.make_frameqa > 0:
log.info("(re)generating QA related to frames")
if (args.make_frameqa % 4) >= 2:
make_frame_plots = True
else:
make_frame_plots = False
# Run
qa_prod.make_frameqa(make_plots=make_frame_plots, clobber=args.clobber)
# Slurp?
if args.slurp:
qa_prod.slurp(make=(args.make_frameqa > 0), remove=args.remove)
# Channel histograms
if args.channel_hist is not None:
# imports
from matplotlib.backends.backend_pdf import PdfPages
from desispec.qa import qa_plots as dqqp
#
qa_prod.load_data()
outfile = qa_prod.prod_name+'_chist.pdf'
pp = PdfPages(outfile)
# Default?
if args.channel_hist == 'default':
dqqp.prod_channel_hist(qa_prod, 'FIBERFLAT', 'MAX_RMS', pp=pp, close=False)
dqqp.prod_channel_hist(qa_prod, 'SKYSUB', 'MED_RESID', xlim=(-1,1), pp=pp, close=False)
dqqp.prod_channel_hist(qa_prod, 'FLUXCALIB', 'MAX_ZP_OFF', pp=pp, close=False)
# Finish
print("Writing {:s}".format(outfile))
pp.close()
| bsd-3-clause |
Denvi/FlatCAM | FlatCAM_GTK/FlatCAMApp.py | 2 | 90553 | ############################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# http://caram.cl/software/flatcam #
# Author: Juan Pablo Caram (c) #
# Date: 2/5/2014 #
# MIT Licence #
############################################################
import threading
import sys
import urllib
import random
from gi.repository import Gtk, GdkPixbuf, GObject, Gdk, GLib
# from shapely import speedups
# Importing shapely speedups was causing the following errors:
# 'C:\WinPython-32\python-2.7.6\Lib\site-packages\gnome\lib/gio/modules\
# libgiognutls.dll': The specified module could not be found.
# Failed to load module: C:\WinPython-32\python-2.7.6\Lib\site-packages\gnome\lib/gio/modules\libgiognutls.dll
# 'C:\WinPython-32\python-2.7.6\Lib\site-packages\gnome\lib/gio/modules\
# libgiolibproxy.dll': The specified module could not be found.
# Failed to load module: C:\WinPython-32\python-2.7.6\Lib\site-packages\gnome\lib/gio/modules\libgiolibproxy.dll
########################################
## Imports part of FlatCAM ##
########################################
from FlatCAM_GTK.FlatCAMWorker import Worker
from FlatCAM_GTK.ObjectCollection import *
from FlatCAM_GTK.FlatCAMObj import *
from FlatCAM_GTK.PlotCanvas import *
from FlatCAM_GTK.FlatCAMGUI import *
class GerberOptionsGroupUI(Gtk.VBox):
def __init__(self):
Gtk.VBox.__init__(self, spacing=3, margin=5, vexpand=False)
## Plot options
self.plot_options_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.plot_options_label.set_markup("<b>Plot Options:</b>")
self.pack_start(self.plot_options_label, expand=False, fill=True, padding=2)
grid0 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid0, expand=True, fill=False, padding=2)
# Plot CB
self.plot_cb = FCCheckBox(label='Plot')
grid0.attach(self.plot_cb, 0, 0, 1, 1)
# Solid CB
self.solid_cb = FCCheckBox(label='Solid')
grid0.attach(self.solid_cb, 1, 0, 1, 1)
# Multicolored CB
self.multicolored_cb = FCCheckBox(label='Multicolored')
grid0.attach(self.multicolored_cb, 2, 0, 1, 1)
## Isolation Routing
self.isolation_routing_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.isolation_routing_label.set_markup("<b>Isolation Routing:</b>")
self.pack_start(self.isolation_routing_label, expand=True, fill=False, padding=2)
grid = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid, expand=True, fill=False, padding=2)
l1 = Gtk.Label('Tool diam:', xalign=1)
grid.attach(l1, 0, 0, 1, 1)
self.iso_tool_dia_entry = LengthEntry()
grid.attach(self.iso_tool_dia_entry, 1, 0, 1, 1)
l2 = Gtk.Label('Width (# passes):', xalign=1)
grid.attach(l2, 0, 1, 1, 1)
self.iso_width_entry = IntEntry()
grid.attach(self.iso_width_entry, 1, 1, 1, 1)
l3 = Gtk.Label('Pass overlap:', xalign=1)
grid.attach(l3, 0, 2, 1, 1)
self.iso_overlap_entry = FloatEntry()
grid.attach(self.iso_overlap_entry, 1, 2, 1, 1)
## Board cuttout
self.isolation_routing_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.isolation_routing_label.set_markup("<b>Board cutout:</b>")
self.pack_start(self.isolation_routing_label, expand=True, fill=False, padding=2)
grid2 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid2, expand=True, fill=False, padding=2)
l4 = Gtk.Label('Tool dia:', xalign=1)
grid2.attach(l4, 0, 0, 1, 1)
self.cutout_tooldia_entry = LengthEntry()
grid2.attach(self.cutout_tooldia_entry, 1, 0, 1, 1)
l5 = Gtk.Label('Margin:', xalign=1)
grid2.attach(l5, 0, 1, 1, 1)
self.cutout_margin_entry = LengthEntry()
grid2.attach(self.cutout_margin_entry, 1, 1, 1, 1)
l6 = Gtk.Label('Gap size:', xalign=1)
grid2.attach(l6, 0, 2, 1, 1)
self.cutout_gap_entry = LengthEntry()
grid2.attach(self.cutout_gap_entry, 1, 2, 1, 1)
l7 = Gtk.Label('Gaps:', xalign=1)
grid2.attach(l7, 0, 3, 1, 1)
self.gaps_radio = RadioSet([{'label': '2 (T/B)', 'value': 'tb'},
{'label': '2 (L/R)', 'value': 'lr'},
{'label': '4', 'value': '4'}])
grid2.attach(self.gaps_radio, 1, 3, 1, 1)
## Non-copper regions
self.noncopper_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.noncopper_label.set_markup("<b>Non-copper regions:</b>")
self.pack_start(self.noncopper_label, expand=True, fill=False, padding=2)
grid3 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid3, expand=True, fill=False, padding=2)
l8 = Gtk.Label('Boundary margin:', xalign=1)
grid3.attach(l8, 0, 0, 1, 1)
self.noncopper_margin_entry = LengthEntry()
grid3.attach(self.noncopper_margin_entry, 1, 0, 1, 1)
self.noncopper_rounded_cb = FCCheckBox(label="Rounded corners")
grid3.attach(self.noncopper_rounded_cb, 0, 1, 2, 1)
## Bounding box
self.boundingbox_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.boundingbox_label.set_markup('<b>Bounding Box:</b>')
self.pack_start(self.boundingbox_label, expand=True, fill=False, padding=2)
grid4 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid4, expand=True, fill=False, padding=2)
l9 = Gtk.Label('Boundary Margin:', xalign=1)
grid4.attach(l9, 0, 0, 1, 1)
self.bbmargin_entry = LengthEntry()
grid4.attach(self.bbmargin_entry, 1, 0, 1, 1)
self.bbrounded_cb = FCCheckBox(label="Rounded corners")
grid4.attach(self.bbrounded_cb, 0, 1, 2, 1)
class ExcellonOptionsGroupUI(Gtk.VBox):
def __init__(self):
Gtk.VBox.__init__(self, spacing=3, margin=5, vexpand=False)
## Plot options
self.plot_options_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.plot_options_label.set_markup("<b>Plot Options:</b>")
self.pack_start(self.plot_options_label, expand=False, fill=True, padding=2)
grid0 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid0, expand=True, fill=False, padding=2)
self.plot_cb = FCCheckBox(label='Plot')
grid0.attach(self.plot_cb, 0, 0, 1, 1)
self.solid_cb = FCCheckBox(label='Solid')
grid0.attach(self.solid_cb, 1, 0, 1, 1)
## Create CNC Job
self.cncjob_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.cncjob_label.set_markup('<b>Create CNC Job</b>')
self.pack_start(self.cncjob_label, expand=True, fill=False, padding=2)
grid1 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid1, expand=True, fill=False, padding=2)
l1 = Gtk.Label('Cut Z:', xalign=1)
grid1.attach(l1, 0, 0, 1, 1)
self.cutz_entry = LengthEntry()
grid1.attach(self.cutz_entry, 1, 0, 1, 1)
l2 = Gtk.Label('Travel Z:', xalign=1)
grid1.attach(l2, 0, 1, 1, 1)
self.travelz_entry = LengthEntry()
grid1.attach(self.travelz_entry, 1, 1, 1, 1)
l3 = Gtk.Label('Feed rate:', xalign=1)
grid1.attach(l3, 0, 2, 1, 1)
self.feedrate_entry = LengthEntry()
grid1.attach(self.feedrate_entry, 1, 2, 1, 1)
class GeometryOptionsGroupUI(Gtk.VBox):
def __init__(self):
Gtk.VBox.__init__(self, spacing=3, margin=5, vexpand=False)
## Plot options
self.plot_options_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.plot_options_label.set_markup("<b>Plot Options:</b>")
self.pack_start(self.plot_options_label, expand=False, fill=True, padding=2)
grid0 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid0, expand=True, fill=False, padding=2)
# Plot CB
self.plot_cb = FCCheckBox(label='Plot')
grid0.attach(self.plot_cb, 0, 0, 1, 1)
## Create CNC Job
self.cncjob_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.cncjob_label.set_markup('<b>Create CNC Job:</b>')
self.pack_start(self.cncjob_label, expand=True, fill=False, padding=2)
grid1 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid1, expand=True, fill=False, padding=2)
# Cut Z
l1 = Gtk.Label('Cut Z:', xalign=1)
grid1.attach(l1, 0, 0, 1, 1)
self.cutz_entry = LengthEntry()
grid1.attach(self.cutz_entry, 1, 0, 1, 1)
# Travel Z
l2 = Gtk.Label('Travel Z:', xalign=1)
grid1.attach(l2, 0, 1, 1, 1)
self.travelz_entry = LengthEntry()
grid1.attach(self.travelz_entry, 1, 1, 1, 1)
l3 = Gtk.Label('Feed rate:', xalign=1)
grid1.attach(l3, 0, 2, 1, 1)
self.cncfeedrate_entry = LengthEntry()
grid1.attach(self.cncfeedrate_entry, 1, 2, 1, 1)
l4 = Gtk.Label('Tool dia:', xalign=1)
grid1.attach(l4, 0, 3, 1, 1)
self.cnctooldia_entry = LengthEntry()
grid1.attach(self.cnctooldia_entry, 1, 3, 1, 1)
## Paint Area
self.paint_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.paint_label.set_markup('<b>Paint Area:</b>')
self.pack_start(self.paint_label, expand=True, fill=False, padding=2)
grid2 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid2, expand=True, fill=False, padding=2)
# Tool dia
l5 = Gtk.Label('Tool dia:', xalign=1)
grid2.attach(l5, 0, 0, 1, 1)
self.painttooldia_entry = LengthEntry()
grid2.attach(self.painttooldia_entry, 1, 0, 1, 1)
# Overlap
l6 = Gtk.Label('Overlap:', xalign=1)
grid2.attach(l6, 0, 1, 1, 1)
self.paintoverlap_entry = LengthEntry()
grid2.attach(self.paintoverlap_entry, 1, 1, 1, 1)
# Margin
l7 = Gtk.Label('Margin:', xalign=1)
grid2.attach(l7, 0, 2, 1, 1)
self.paintmargin_entry = LengthEntry()
grid2.attach(self.paintmargin_entry, 1, 2, 1, 1)
class CNCJobOptionsGroupUI(Gtk.VBox):
def __init__(self):
Gtk.VBox.__init__(self, spacing=3, margin=5, vexpand=False)
## Plot options
self.plot_options_label = Gtk.Label(justify=Gtk.Justification.LEFT, xalign=0, margin_top=5)
self.plot_options_label.set_markup("<b>Plot Options:</b>")
self.pack_start(self.plot_options_label, expand=False, fill=True, padding=2)
grid0 = Gtk.Grid(column_spacing=3, row_spacing=2)
self.pack_start(grid0, expand=True, fill=False, padding=2)
# Plot CB
self.plot_cb = FCCheckBox(label='Plot')
grid0.attach(self.plot_cb, 0, 0, 2, 1)
# Tool dia for plot
l1 = Gtk.Label('Tool dia:', xalign=1)
grid0.attach(l1, 0, 1, 1, 1)
self.tooldia_entry = LengthEntry()
grid0.attach(self.tooldia_entry, 1, 1, 1, 1)
class GlobalOptionsUI(Gtk.VBox):
def __init__(self):
Gtk.VBox.__init__(self, spacing=3, margin=5, vexpand=False)
box1 = Gtk.Box()
self.pack_start(box1, expand=False, fill=False, padding=2)
l1 = Gtk.Label('Units:')
box1.pack_start(l1, expand=False, fill=False, padding=2)
self.units_radio = RadioSet([{'label': 'inch', 'value': 'IN'},
{'label': 'mm', 'value': 'MM'}])
box1.pack_start(self.units_radio, expand=False, fill=False, padding=2)
####### Gerber #######
l2 = Gtk.Label(margin=5)
l2.set_markup('<b>Gerber Options</b>')
frame1 = Gtk.Frame(label_widget=l2)
self.pack_start(frame1, expand=False, fill=False, padding=2)
self.gerber_group = GerberOptionsGroupUI()
frame1.add(self.gerber_group)
######## Excellon #########
l3 = Gtk.Label(margin=5)
l3.set_markup('<b>Excellon Options</b>')
frame2 = Gtk.Frame(label_widget=l3)
self.pack_start(frame2, expand=False, fill=False, padding=2)
self.excellon_group = ExcellonOptionsGroupUI()
frame2.add(self.excellon_group)
########## Geometry ##########
l4 = Gtk.Label(margin=5)
l4.set_markup('<b>Geometry Options</b>')
frame3 = Gtk.Frame(label_widget=l4)
self.pack_start(frame3, expand=False, fill=False, padding=2)
self.geometry_group = GeometryOptionsGroupUI()
frame3.add(self.geometry_group)
########## CNC ############
l5 = Gtk.Label(margin=5)
l5.set_markup('<b>CNC Job Options</b>')
frame4 = Gtk.Frame(label_widget=l5)
self.pack_start(frame4, expand=False, fill=False, padding=2)
self.cncjob_group = CNCJobOptionsGroupUI()
frame4.add(self.cncjob_group)
########################################
## App ##
########################################
class App:
"""
The main application class. The constructor starts the GUI.
"""
log = logging.getLogger('base')
log.setLevel(logging.DEBUG)
#log.setLevel(logging.WARNING)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
version_url = "http://caram.cl/flatcam/VERSION"
def __init__(self):
"""
Starts the application. Takes no parameters.
:return: app
:rtype: App
"""
App.log.info("FlatCAM Starting...")
# if speedups.available:
# App.log.info("Enabling geometry speedups...")
# speedups.enable()
# Needed to interact with the GUI from other threads.
App.log.debug("GObject.threads_init()...")
GObject.threads_init()
#### GUI ####
# Glade init
# App.log.debug("Building GUI from Glade file...")
# self.gladefile = "FlatCAM.ui"
# self.builder = Gtk.Builder()
# self.builder.add_from_file(self.gladefile)
#
# # References to UI widgets
# self.window = self.builder.get_object("window1")
# self.position_label = self.builder.get_object("label3")
# self.grid = self.builder.get_object("grid1")
# self.notebook = self.builder.get_object("notebook1")
# self.info_label = self.builder.get_object("label_status")
# self.progress_bar = self.builder.get_object("progressbar")
# self.progress_bar.set_show_text(True)
# self.units_label = self.builder.get_object("label_units")
# self.toolbar = self.builder.get_object("toolbar_main")
#
# # White (transparent) background on the "Options" tab.
# self.builder.get_object("vp_options").override_background_color(Gtk.StateType.NORMAL,
# Gdk.RGBA(1, 1, 1, 1))
# # Combo box to choose between project and application options.
# self.combo_options = self.builder.get_object("combo_options")
# self.combo_options.set_active(1)
self.ui = FlatCAMGUI()
#self.setup_project_list() # The "Project" tab
self.setup_component_editor() # The "Selected" tab
## Setup the toolbar. Adds buttons.
self.setup_toolbar()
# App.log.debug("Connecting signals from builder...")
#### Event handling ####
# self.builder.connect_signals(self)
self.ui.menufileopengerber.connect('activate', self.on_fileopengerber)
#### Make plot area ####
self.plotcanvas = PlotCanvas(self.ui.plotarea)
self.plotcanvas.mpl_connect('button_press_event', self.on_click_over_plot)
self.plotcanvas.mpl_connect('motion_notify_event', self.on_mouse_move_over_plot)
self.plotcanvas.mpl_connect('key_press_event', self.on_key_over_plot)
#### DATA ####
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.setup_obj_classes()
self.mouse = None # Mouse coordinates over plot
self.recent = []
self.collection = ObjectCollection()
# self.builder.get_object("box_project").pack_start(self.collection.view, False, False, 1)
self.ui.notebook.project_contents.pack_start(self.collection.view, False, False, 1)
# TODO: Do this different
self.collection.view.connect("row_activated", self.on_row_activated)
# Used to inhibit the on_options_update callback when
# the options are being changed by the program and not the user.
self.options_update_ignore = False
self.toggle_units_ignore = False
# self.options_box = self.builder.get_object('options_box')
## Application defaults ##
self.defaults_form = GlobalOptionsUI()
self.defaults_form_fields = {
"units": self.defaults_form.units_radio,
"gerber_plot": self.defaults_form.gerber_group.plot_cb,
"gerber_solid": self.defaults_form.gerber_group.solid_cb,
"gerber_multicolored": self.defaults_form.gerber_group.multicolored_cb,
"gerber_isotooldia": self.defaults_form.gerber_group.iso_tool_dia_entry,
"gerber_isopasses": self.defaults_form.gerber_group.iso_width_entry,
"gerber_isooverlap": self.defaults_form.gerber_group.iso_overlap_entry,
"gerber_cutouttooldia": self.defaults_form.gerber_group.cutout_tooldia_entry,
"gerber_cutoutmargin": self.defaults_form.gerber_group.cutout_margin_entry,
"gerber_cutoutgapsize": self.defaults_form.gerber_group.cutout_gap_entry,
"gerber_gaps": self.defaults_form.gerber_group.gaps_radio,
"gerber_noncoppermargin": self.defaults_form.gerber_group.noncopper_margin_entry,
"gerber_noncopperrounded": self.defaults_form.gerber_group.noncopper_rounded_cb,
"gerber_bboxmargin": self.defaults_form.gerber_group.bbmargin_entry,
"gerber_bboxrounded": self.defaults_form.gerber_group.bbrounded_cb,
"excellon_plot": self.defaults_form.excellon_group.plot_cb,
"excellon_solid": self.defaults_form.excellon_group.solid_cb,
"excellon_drillz": self.defaults_form.excellon_group.cutz_entry,
"excellon_travelz": self.defaults_form.excellon_group.travelz_entry,
"excellon_feedrate": self.defaults_form.excellon_group.feedrate_entry,
"geometry_plot": self.defaults_form.geometry_group.plot_cb,
"geometry_cutz": self.defaults_form.geometry_group.cutz_entry,
"geometry_travelz": self.defaults_form.geometry_group.travelz_entry,
"geometry_feedrate": self.defaults_form.geometry_group.cncfeedrate_entry,
"geometry_cnctooldia": self.defaults_form.geometry_group.cnctooldia_entry,
"geometry_painttooldia": self.defaults_form.geometry_group.painttooldia_entry,
"geometry_paintoverlap": self.defaults_form.geometry_group.paintoverlap_entry,
"geometry_paintmargin": self.defaults_form.geometry_group.paintmargin_entry,
"cncjob_plot": self.defaults_form.cncjob_group.plot_cb,
"cncjob_tooldia": self.defaults_form.cncjob_group.tooldia_entry
}
self.defaults = {
"units": "IN",
"gerber_plot": True,
"gerber_solid": True,
"gerber_multicolored": False,
"gerber_isotooldia": 0.016,
"gerber_isopasses": 1,
"gerber_isooverlap": 0.15,
"gerber_cutouttooldia": 0.07,
"gerber_cutoutmargin": 0.1,
"gerber_cutoutgapsize": 0.15,
"gerber_gaps": "4",
"gerber_noncoppermargin": 0.0,
"gerber_noncopperrounded": False,
"gerber_bboxmargin": 0.0,
"gerber_bboxrounded": False,
"excellon_plot": True,
"excellon_solid": False,
"excellon_drillz": -0.1,
"excellon_travelz": 0.1,
"excellon_feedrate": 3.0,
"geometry_plot": True,
"geometry_cutz": -0.002,
"geometry_travelz": 0.1,
"geometry_feedrate": 3.0,
"geometry_cnctooldia": 0.016,
"geometry_painttooldia": 0.07,
"geometry_paintoverlap": 0.15,
"geometry_paintmargin": 0.0,
"cncjob_plot": True,
"cncjob_tooldia": 0.016
}
self.load_defaults()
self.defaults_write_form()
## Current Project ##
self.options_form = GlobalOptionsUI()
self.options_form_fields = {
"units": self.options_form.units_radio,
"gerber_plot": self.options_form.gerber_group.plot_cb,
"gerber_solid": self.options_form.gerber_group.solid_cb,
"gerber_multicolored": self.options_form.gerber_group.multicolored_cb,
"gerber_isotooldia": self.options_form.gerber_group.iso_tool_dia_entry,
"gerber_isopasses": self.options_form.gerber_group.iso_width_entry,
"gerber_isooverlap": self.options_form.gerber_group.iso_overlap_entry,
"gerber_cutouttooldia": self.options_form.gerber_group.cutout_tooldia_entry,
"gerber_cutoutmargin": self.options_form.gerber_group.cutout_margin_entry,
"gerber_cutoutgapsize": self.options_form.gerber_group.cutout_gap_entry,
"gerber_gaps": self.options_form.gerber_group.gaps_radio,
"gerber_noncoppermargin": self.options_form.gerber_group.noncopper_margin_entry,
"gerber_noncopperrounded": self.options_form.gerber_group.noncopper_rounded_cb,
"gerber_bboxmargin": self.options_form.gerber_group.bbmargin_entry,
"gerber_bboxrounded": self.options_form.gerber_group.bbrounded_cb,
"excellon_plot": self.options_form.excellon_group.plot_cb,
"excellon_solid": self.options_form.excellon_group.solid_cb,
"excellon_drillz": self.options_form.excellon_group.cutz_entry,
"excellon_travelz": self.options_form.excellon_group.travelz_entry,
"excellon_feedrate": self.options_form.excellon_group.feedrate_entry,
"geometry_plot": self.options_form.geometry_group.plot_cb,
"geometry_cutz": self.options_form.geometry_group.cutz_entry,
"geometry_travelz": self.options_form.geometry_group.travelz_entry,
"geometry_feedrate": self.options_form.geometry_group.cncfeedrate_entry,
"geometry_cnctooldia": self.options_form.geometry_group.cnctooldia_entry,
"geometry_painttooldia": self.options_form.geometry_group.painttooldia_entry,
"geometry_paintoverlap": self.options_form.geometry_group.paintoverlap_entry,
"geometry_paintmargin": self.options_form.geometry_group.paintmargin_entry,
"cncjob_plot": self.options_form.cncjob_group.plot_cb,
"cncjob_tooldia": self.options_form.cncjob_group.tooldia_entry
}
# Project options
self.options = {
"units": "IN",
"gerber_plot": True,
"gerber_solid": True,
"gerber_multicolored": False,
"gerber_isotooldia": 0.016,
"gerber_isopasses": 1,
"gerber_isooverlap": 0.15,
"gerber_cutouttooldia": 0.07,
"gerber_cutoutmargin": 0.1,
"gerber_cutoutgapsize": 0.15,
"gerber_gaps": "4",
"gerber_noncoppermargin": 0.0,
"gerber_noncopperrounded": False,
"gerber_bboxmargin": 0.0,
"gerber_bboxrounded": False,
"excellon_plot": True,
"excellon_solid": False,
"excellon_drillz": -0.1,
"excellon_travelz": 0.1,
"excellon_feedrate": 3.0,
"geometry_plot": True,
"geometry_cutz": -0.002,
"geometry_travelz": 0.1,
"geometry_feedrate": 3.0,
"geometry_cnctooldia": 0.016,
"geometry_painttooldia": 0.07,
"geometry_paintoverlap": 0.15,
"geometry_paintmargin": 0.0,
"cncjob_plot": True,
"cncjob_tooldia": 0.016
}
self.options.update(self.defaults) # Copy app defaults to project options
self.options_write_form()
self.project_filename = None
# Where we draw the options/defaults forms.
self.on_options_combo_change(None)
#self.options_box.pack_start(self.defaults_form, False, False, 1)
self.options_form.units_radio.group_toggle_fn = lambda x, y: self.on_toggle_units(x)
## Event subscriptions ##
## Tools ##
# self.measure = Measurement(self.builder.get_object("box39"), self.plotcanvas)
self.measure = Measurement(self.ui.plotarea_super, self.plotcanvas)
# Toolbar icon
# TODO: Where should I put this? Tool should have a method to add to toolbar?
meas_ico = Gtk.Image.new_from_file('share/measure32.png')
measure = Gtk.ToolButton.new(meas_ico, "")
measure.connect("clicked", self.measure.toggle_active)
measure.set_tooltip_markup("<b>Measure Tool:</b> Enable/disable tool.\n" +
"Click on point to set reference.\n" +
"(Click on plot and hit <b>m</b>)")
# self.toolbar.insert(measure, -1)
self.ui.toolbar.insert(measure, -1)
#### Initialization ####
# self.units_label.set_text("[" + self.options["units"] + "]")
self.ui.units_label.set_text("[" + self.options["units"] + "]")
self.setup_recent_items()
App.log.info("Starting Worker...")
self.worker = Worker()
self.worker.daemon = True
self.worker.start()
#### Check for updates ####
# Separate thread (Not worker)
self.version = 5
App.log.info("Checking for updates in backgroud (this is version %s)." % str(self.version))
t1 = threading.Thread(target=self.version_check)
t1.daemon = True
t1.start()
#### For debugging only ###
def somethreadfunc(app_obj):
App.log.info("Hello World!")
t = threading.Thread(target=somethreadfunc, args=(self,))
t.daemon = True
t.start()
########################################
## START ##
########################################
self.icon256 = GdkPixbuf.Pixbuf.new_from_file('share/flatcam_icon256.png')
self.icon48 = GdkPixbuf.Pixbuf.new_from_file('share/flatcam_icon48.png')
self.icon16 = GdkPixbuf.Pixbuf.new_from_file('share/flatcam_icon16.png')
Gtk.Window.set_default_icon_list([self.icon16, self.icon48, self.icon256])
# self.window.set_title("FlatCAM - Alpha 5")
# self.window.set_default_size(900, 600)
# self.window.show_all()
self.ui.show_all()
App.log.info("END of constructor. Releasing control.")
def message_dialog(self, title, message, kind="info"):
types = {"info": Gtk.MessageType.INFO,
"warn": Gtk.MessageType.WARNING,
"error": Gtk.MessageType.ERROR}
dlg = Gtk.MessageDialog(self.ui, 0, types[kind], Gtk.ButtonsType.OK, title)
dlg.format_secondary_text(message)
def lifecycle():
dlg.run()
dlg.destroy()
GLib.idle_add(lifecycle)
def question_dialog(self, title, message):
label = Gtk.Label(message)
dialog = Gtk.Dialog(title, self.window, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.set_default_size(150, 100)
dialog.set_modal(True)
box = dialog.get_content_area()
box.set_border_width(10)
box.add(label)
dialog.show_all()
response = dialog.run()
dialog.destroy()
return response
def setup_toolbar(self):
# Zoom fit
# zf_ico = Gtk.Image.new_from_file('share/zoom_fit32.png')
# zoom_fit = Gtk.ToolButton.new(zf_ico, "")
# zoom_fit.connect("clicked", self.on_zoom_fit)
# zoom_fit.set_tooltip_markup("Zoom Fit.\n(Click on plot and hit <b>1</b>)")
# self.toolbar.insert(zoom_fit, -1)
self.ui.zoom_fit_btn.connect("clicked", self.on_zoom_fit)
# Zoom out
# zo_ico = Gtk.Image.new_from_file('share/zoom_out32.png')
# zoom_out = Gtk.ToolButton.new(zo_ico, "")
# zoom_out.connect("clicked", self.on_zoom_out)
# zoom_out.set_tooltip_markup("Zoom Out.\n(Click on plot and hit <b>2</b>)")
# self.toolbar.insert(zoom_out, -1)
self.ui.zoom_out_btn.connect("clicked", self.on_zoom_out)
# Zoom in
# zi_ico = Gtk.Image.new_from_file('share/zoom_in32.png')
# zoom_in = Gtk.ToolButton.new(zi_ico, "")
# zoom_in.connect("clicked", self.on_zoom_in)
# zoom_in.set_tooltip_markup("Zoom In.\n(Click on plot and hit <b>3</b>)")
# self.toolbar.insert(zoom_in, -1)
self.ui.zoom_in_btn.connect("clicked", self.on_zoom_in)
# Clear plot
# cp_ico = Gtk.Image.new_from_file('share/clear_plot32.png')
# clear_plot = Gtk.ToolButton.new(cp_ico, "")
# clear_plot.connect("clicked", self.on_clear_plots)
# clear_plot.set_tooltip_markup("Clear Plot")
# self.toolbar.insert(clear_plot, -1)
self.ui.clear_plot_btn.connect("clicked", self.on_clear_plots)
# Replot
# rp_ico = Gtk.Image.new_from_file('share/replot32.png')
# replot = Gtk.ToolButton.new(rp_ico, "")
# replot.connect("clicked", self.on_toolbar_replot)
# replot.set_tooltip_markup("Re-plot all")
# self.toolbar.insert(replot, -1)
self.ui.replot_btn.connect("clicked", self.on_toolbar_replot)
# Delete item
# del_ico = Gtk.Image.new_from_file('share/delete32.png')
# delete = Gtk.ToolButton.new(del_ico, "")
# delete.connect("clicked", self.on_delete)
# delete.set_tooltip_markup("Delete selected\nobject.")
# self.toolbar.insert(delete, -1)
self.ui.delete_btn.connect("clicked", self.on_delete)
def setup_obj_classes(self):
"""
Sets up application specifics on the FlatCAMObj class.
:return: None
"""
FlatCAMObj.app = self
def setup_component_editor(self):
"""
Initial configuration of the component editor. Creates
a page titled "Selection" on the notebook on the left
side of the main window.
:return: None
"""
# box_selected = self.builder.get_object("vp_selected")
# White background
# box_selected.override_background_color(Gtk.StateType.NORMAL,
# Gdk.RGBA(1, 1, 1, 1))
self.ui.notebook.selected_contents.override_background_color(Gtk.StateType.NORMAL,
Gdk.RGBA(1, 1, 1, 1))
# Remove anything else in the box
box_children = self.ui.notebook.selected_contents.get_children()
for child in box_children:
self.ui.notebook.selected_contents.remove(child)
box1 = Gtk.Box(Gtk.Orientation.VERTICAL)
label1 = Gtk.Label("Choose an item from Project")
box1.pack_start(label1, True, False, 1)
self.ui.notebook.selected_contents.add(box1)
box1.show()
label1.show()
def setup_recent_items(self):
# TODO: Move this to constructor
icons = {
"gerber": "share/flatcam_icon16.png",
"excellon": "share/drill16.png",
"cncjob": "share/cnc16.png",
"project": "share/project16.png"
}
openers = {
'gerber': self.open_gerber,
'excellon': self.open_excellon,
'cncjob': self.open_gcode,
'project': self.open_project
}
# Closure needed to create callbacks in a loop.
# Otherwise late binding occurs.
def make_callback(func, fname):
def opener(*args):
self.worker.add_task(func, [fname])
return opener
try:
f = open('recent.json')
except IOError:
App.log.error("Failed to load recent item list.")
self.info("ERROR: Failed to load recent item list.")
return
try:
self.recent = json.load(f)
except:
App.log.error("Failed to parse recent item list.")
self.info("ERROR: Failed to parse recent item list.")
f.close()
return
f.close()
recent_menu = Gtk.Menu()
for recent in self.recent:
filename = recent['filename'].split('/')[-1].split('\\')[-1]
item = Gtk.ImageMenuItem.new_with_label(filename)
im = Gtk.Image.new_from_file(icons[recent["kind"]])
item.set_image(im)
o = make_callback(openers[recent["kind"]], recent['filename'])
item.connect('activate', o)
recent_menu.append(item)
# self.builder.get_object('open_recent').set_submenu(recent_menu)
self.ui.menufilerecent.set_submenu(recent_menu)
recent_menu.show_all()
def info(self, text):
"""
Show text on the status bar. This method is thread safe.
:param text: Text to display.
:type text: str
:return: None
"""
GLib.idle_add(lambda: self.ui.info_label.set_text(text))
def get_radio_value(self, radio_set):
"""
Returns the radio_set[key] of the radiobutton
whose name is key is active.
:param radio_set: A dictionary containing widget_name: value pairs.
:type radio_set: dict
:return: radio_set[key]
"""
for name in radio_set:
if self.builder.get_object(name).get_active():
return radio_set[name]
def plot_all(self):
"""
Re-generates all plots from all objects.
:return: None
"""
self.plotcanvas.clear()
self.set_progress_bar(0.1, "Re-plotting...")
def worker_task(app_obj):
percentage = 0.1
try:
delta = 0.9 / len(self.collection.get_list())
except ZeroDivisionError:
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, ""))
return
for obj in self.collection.get_list():
obj.plot()
percentage += delta
GLib.idle_add(lambda: app_obj.set_progress_bar(percentage, "Re-plotting..."))
GLib.idle_add(app_obj.plotcanvas.auto_adjust_axes)
GLib.idle_add(lambda: self.on_zoom_fit(None))
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, "Idle"))
# Send to worker
self.worker.add_task(worker_task, [self])
def get_eval(self, widget_name):
"""
Runs eval() on the on the text entry of name 'widget_name'
and returns the results.
:param widget_name: Name of Gtk.Entry
:type widget_name: str
:return: Depends on contents of the entry text.
"""
value = self.builder.get_object(widget_name).get_text()
if value == "":
value = "None"
try:
evald = eval(value)
return evald
except:
self.info("Could not evaluate: " + value)
return None
def new_object(self, kind, name, initialize, active=True, fit=True, plot=True):
"""
Creates a new specalized FlatCAMObj and attaches it to the application,
this is, updates the GUI accordingly, any other records and plots it.
This method is thread-safe.
:param kind: The kind of object to create. One of 'gerber',
'excellon', 'cncjob' and 'geometry'.
:type kind: str
:param name: Name for the object.
:type name: str
:param initialize: Function to run after creation of the object
but before it is attached to the application. The function is
called with 2 parameters: the new object and the App instance.
:type initialize: function
:return: None
:rtype: None
"""
App.log.debug("new_object()")
# This is ok here... NO.
# t = Gtk.TextView()
# print t
### Check for existing name
if name in self.collection.get_names():
## Create a new name
# Ends with number?
App.log.debug("new_object(): Object name exists, changing.")
match = re.search(r'(.*[^\d])?(\d+)$', name)
if match: # Yes: Increment the number!
base = match.group(1) or ''
num = int(match.group(2))
name = base + str(num + 1)
else: # No: add a number!
name += "_1"
# App dies here!
# t = Gtk.TextView()
# print t
# Create object
classdict = {
"gerber": FlatCAMGerber,
"excellon": FlatCAMExcellon,
"cncjob": FlatCAMCNCjob,
"geometry": FlatCAMGeometry
}
obj = classdict[kind](name)
obj.units = self.options["units"] # TODO: The constructor should look at defaults.
# Set default options from self.options
for option in self.options:
if option.find(kind + "_") == 0:
oname = option[len(kind)+1:]
obj.options[oname] = self.options[option]
# Initialize as per user request
# User must take care to implement initialize
# in a thread-safe way as is is likely that we
# have been invoked in a separate thread.
initialize(obj, self)
# Check units and convert if necessary
if self.options["units"].upper() != obj.units.upper():
GLib.idle_add(lambda: self.info("Converting units to " + self.options["units"] + "."))
obj.convert_units(self.options["units"])
# Add to our records
self.collection.append(obj, active=active)
# Show object details now.
# GLib.idle_add(lambda: self.notebook.set_current_page(1))
GLib.idle_add(lambda: self.ui.notebook.set_current_page(1))
# Plot
# TODO: (Thread-safe?)
if plot:
obj.plot()
if fit:
GLib.idle_add(lambda: self.on_zoom_fit(None))
return obj
def set_progress_bar(self, percentage, text=""):
"""
Sets the application's progress bar to a given frac_digits and text.
:param percentage: The frac_digits (0.0-1.0) of the progress.
:type percentage: float
:param text: Text to display on the progress bar.
:type text: str
:return: None
"""
# self.progress_bar.set_text(text)
# self.progress_bar.set_fraction(percentage)
self.ui.progress_bar.set_text(text)
self.ui.progress_bar.set_fraction(percentage)
return False
def load_defaults(self):
"""
Loads the aplication's default settings from defaults.json into
``self.defaults``.
:return: None
"""
try:
f = open("defaults.json")
options = f.read()
f.close()
except IOError:
App.log.error("Could not load defaults file.")
self.info("ERROR: Could not load defaults file.")
return
try:
defaults = json.loads(options)
except:
e = sys.exc_info()[0]
App.log.error(str(e))
self.info("ERROR: Failed to parse defaults file.")
return
self.defaults.update(defaults)
def defaults_read_form(self):
for option in self.defaults_form_fields:
self.defaults[option] = self.defaults_form_fields[option].get_value()
def options_read_form(self):
for option in self.options_form_fields:
self.options[option] = self.options_form_fields[option].get_value()
def defaults_write_form(self):
for option in self.defaults_form_fields:
self.defaults_form_fields[option].set_value(self.defaults[option])
def options_write_form(self):
for option in self.options_form_fields:
self.options_form_fields[option].set_value(self.options[option])
def save_project(self, filename):
"""
Saves the current project to the specified file.
:param filename: Name of the file in which to save.
:type filename: str
:return: None
"""
# Capture the latest changes
try:
self.collection.get_active().read_form()
except:
pass
# Serialize the whole project
d = {"objs": [obj.to_dict() for obj in self.collection.get_list()],
"options": self.options}
try:
f = open(filename, 'w')
except IOError:
App.log.error("ERROR: Failed to open file for saving:", filename)
return
try:
json.dump(d, f, default=to_dict)
except:
App.log.error("ERROR: File open but failed to write:", filename)
f.close()
return
f.close()
def open_project(self, filename):
"""
Loads a project from the specified file.
:param filename: Name of the file from which to load.
:type filename: str
:return: None
"""
App.log.debug("Opening project: " + filename)
try:
f = open(filename, 'r')
except IOError:
App.log.error("Failed to open project file: %s" % filename)
self.info("ERROR: Failed to open project file: %s" % filename)
return
try:
d = json.load(f, object_hook=dict2obj)
except:
App.log.error("Failed to parse project file: %s" % filename)
self.info("ERROR: Failed to parse project file: %s" % filename)
f.close()
return
self.register_recent("project", filename)
# Clear the current project
self.on_file_new(None)
# Project options
self.options.update(d['options'])
self.project_filename = filename
GLib.idle_add(lambda: self.units_label.set_text(self.options["units"]))
# Re create objects
App.log.debug("Re-creating objects...")
for obj in d['objs']:
def obj_init(obj_inst, app_inst):
obj_inst.from_dict(obj)
App.log.debug(obj['kind'] + ": " + obj['options']['name'])
self.new_object(obj['kind'], obj['options']['name'], obj_init, active=False, fit=False, plot=False)
self.plot_all()
self.info("Project loaded from: " + filename)
App.log.debug("Project loaded")
def populate_objects_combo(self, combo):
"""
Populates a Gtk.Comboboxtext with the list of the object in the project.
:param combo: Name or instance of the comboboxtext.
:type combo: str or Gtk.ComboBoxText
:return: None
"""
App.log.debug("Populating combo!")
if type(combo) == str:
combo = self.builder.get_object(combo)
combo.remove_all()
for name in self.collection.get_names():
combo.append_text(name)
def version_check(self, *args):
"""
Checks for the latest version of the program. Alerts the
user if theirs is outdated. This method is meant to be run
in a saeparate thread.
:return: None
"""
try:
f = urllib.urlopen(App.version_url)
except:
App.log.warning("Failed checking for latest version. Could not connect.")
GLib.idle_add(lambda: self.info("ERROR trying to check for latest version."))
return
try:
data = json.load(f)
except:
App.log.error("Could nor parse information about latest version.")
GLib.idle_add(lambda: self.info("ERROR trying to check for latest version."))
f.close()
return
f.close()
if self.version >= data["version"]:
GLib.idle_add(lambda: self.info("FlatCAM is up to date!"))
return
label = Gtk.Label("There is a newer version of FlatCAM\n" +
"available for download:\n\n" +
data["name"] + "\n\n" + data["message"])
dialog = Gtk.Dialog("Newer Version Available", self.window, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.set_default_size(150, 100)
dialog.set_modal(True)
box = dialog.get_content_area()
box.set_border_width(10)
box.add(label)
def do_dialog():
dialog.show_all()
response = dialog.run()
dialog.destroy()
GLib.idle_add(lambda: do_dialog())
return
def do_nothing(self, param):
return
def disable_plots(self, except_current=False):
"""
Disables all plots with exception of the current object if specified.
:param except_current: Wether to skip the current object.
:rtype except_current: boolean
:return: None
"""
# TODO: This method is very similar to replot_all. Try to merge.
self.set_progress_bar(0.1, "Re-plotting...")
def worker_task(app_obj):
percentage = 0.1
try:
delta = 0.9 / len(self.collection.get_list())
except ZeroDivisionError:
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, ""))
return
for obj in self.collection.get_list():
if obj != self.collection.get_active() or not except_current:
obj.options['plot'] = False
obj.plot()
percentage += delta
GLib.idle_add(lambda: app_obj.set_progress_bar(percentage, "Re-plotting..."))
GLib.idle_add(app_obj.plotcanvas.auto_adjust_axes)
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, ""))
# Send to worker
self.worker.add_task(worker_task, [self])
def enable_all_plots(self, *args):
self.plotcanvas.clear()
self.set_progress_bar(0.1, "Re-plotting...")
def worker_task(app_obj):
percentage = 0.1
try:
delta = 0.9 / len(self.collection.get_list())
except ZeroDivisionError:
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, ""))
return
for obj in self.collection.get_list():
obj.options['plot'] = True
obj.plot()
percentage += delta
GLib.idle_add(lambda: app_obj.set_progress_bar(percentage, "Re-plotting..."))
GLib.idle_add(app_obj.plotcanvas.auto_adjust_axes)
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, ""))
# Send to worker
self.worker.add_task(worker_task, [self])
def register_recent(self, kind, filename):
record = {'kind': kind, 'filename': filename}
if record in self.recent:
return
self.recent.insert(0, record)
if len(self.recent) > 10: # Limit reached
self.recent.pop()
try:
f = open('recent.json', 'w')
except IOError:
App.log.error("Failed to open recent items file for writing.")
self.info('Failed to open recent files file for writing.')
return
try:
json.dump(self.recent, f)
except:
App.log.error("Failed to write to recent items file.")
self.info('ERROR: Failed to write to recent items file.')
f.close()
f.close()
def open_gerber(self, filename):
"""
Opens a Gerber file, parses it and creates a new object for
it in the program. Thread-safe.
:param filename: Gerber file filename
:type filename: str
:return: None
"""
# Fails here
# t = Gtk.TextView()
# print t
GLib.idle_add(lambda: self.set_progress_bar(0.1, "Opening Gerber ..."))
# How the object should be initialized
def obj_init(gerber_obj, app_obj):
assert isinstance(gerber_obj, FlatCAMGerber)
# Opening the file happens here
GLib.idle_add(lambda: app_obj.set_progress_bar(0.2, "Parsing ..."))
gerber_obj.parse_file(filename)
# Further parsing
GLib.idle_add(lambda: app_obj.set_progress_bar(0.5, "Creating Geometry ..."))
GLib.idle_add(lambda: app_obj.set_progress_bar(0.6, "Plotting ..."))
# Object name
name = filename.split('/')[-1].split('\\')[-1]
self.new_object("gerber", name, obj_init)
# New object creation and file processing
# try:
# self.new_object("gerber", name, obj_init)
# except:
# e = sys.exc_info()
# print "ERROR:", e[0]
# traceback.print_exc()
# self.message_dialog("Failed to create Gerber Object",
# "Attempting to create a FlatCAM Gerber Object from " +
# "Gerber file failed during processing:\n" +
# str(e[0]) + " " + str(e[1]), kind="error")
# GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, "Idle"))
# self.collection.delete_active()
# return
# Register recent file
self.register_recent("gerber", filename)
# GUI feedback
self.info("Opened: " + filename)
GLib.idle_add(lambda: self.set_progress_bar(1.0, "Done!"))
GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, "Idle"))
def open_excellon(self, filename):
"""
Opens an Excellon file, parses it and creates a new object for
it in the program. Thread-safe.
:param filename: Excellon file filename
:type filename: str
:return: None
"""
GLib.idle_add(lambda: self.set_progress_bar(0.1, "Opening Excellon ..."))
# How the object should be initialized
def obj_init(excellon_obj, app_obj):
GLib.idle_add(lambda: app_obj.set_progress_bar(0.2, "Parsing ..."))
excellon_obj.parse_file(filename)
excellon_obj.create_geometry()
GLib.idle_add(lambda: app_obj.set_progress_bar(0.6, "Plotting ..."))
# Object name
name = filename.split('/')[-1].split('\\')[-1]
# New object creation and file processing
try:
self.new_object("excellon", name, obj_init)
except:
e = sys.exc_info()
App.log.error(str(e))
self.message_dialog("Failed to create Excellon Object",
"Attempting to create a FlatCAM Excellon Object from " +
"Excellon file failed during processing:\n" +
str(e[0]) + " " + str(e[1]), kind="error")
GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, "Idle"))
self.collection.delete_active()
return
# Register recent file
self.register_recent("excellon", filename)
# GUI feedback
self.info("Opened: " + filename)
GLib.idle_add(lambda: self.set_progress_bar(1.0, "Done!"))
GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, ""))
def open_gcode(self, filename):
"""
Opens a G-gcode file, parses it and creates a new object for
it in the program. Thread-safe.
:param filename: G-code file filename
:type filename: str
:return: None
"""
# How the object should be initialized
def obj_init(job_obj, app_obj_):
"""
:type app_obj_: App
"""
assert isinstance(app_obj_, App)
GLib.idle_add(lambda: app_obj_.set_progress_bar(0.1, "Opening G-Code ..."))
f = open(filename)
gcode = f.read()
f.close()
job_obj.gcode = gcode
GLib.idle_add(lambda: app_obj_.set_progress_bar(0.2, "Parsing ..."))
job_obj.gcode_parse()
GLib.idle_add(lambda: app_obj_.set_progress_bar(0.6, "Creating geometry ..."))
job_obj.create_geometry()
GLib.idle_add(lambda: app_obj_.set_progress_bar(0.6, "Plotting ..."))
# Object name
name = filename.split('/')[-1].split('\\')[-1]
# New object creation and file processing
try:
self.new_object("cncjob", name, obj_init)
except:
e = sys.exc_info()
App.log.error(str(e))
self.message_dialog("Failed to create CNCJob Object",
"Attempting to create a FlatCAM CNCJob Object from " +
"G-Code file failed during processing:\n" +
str(e[0]) + " " + str(e[1]), kind="error")
GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, "Idle"))
self.collection.delete_active()
return
# Register recent file
self.register_recent("cncjob", filename)
# GUI feedback
self.info("Opened: " + filename)
GLib.idle_add(lambda: self.set_progress_bar(1.0, "Done!"))
GLib.timeout_add_seconds(1, lambda: self.set_progress_bar(0.0, ""))
########################################
## EVENT HANDLERS ##
########################################
def on_debug_printlist(self, *args):
self.collection.print_list()
def on_disable_all_plots(self, widget):
self.disable_plots()
def on_disable_all_plots_not_current(self, widget):
self.disable_plots(except_current=True)
def on_about(self, widget):
"""
Opens the 'About' dialog box.
:param widget: Ignored.
:return: None
"""
about = self.builder.get_object("aboutdialog")
about.run()
about.hide()
def on_create_mirror(self, widget):
"""
Creates a mirror image of an object to be used as a bottom layer.
:param widget: Ignored.
:return: None
"""
# TODO: Move (some of) this to camlib!
# Object to mirror
obj_name = self.builder.get_object("comboboxtext_bottomlayer").get_active_text()
fcobj = self.collection.get_by_name(obj_name)
# For now, lets limit to Gerbers and Excellons.
# assert isinstance(gerb, FlatCAMGerber)
if not isinstance(fcobj, FlatCAMGerber) and not isinstance(fcobj, FlatCAMExcellon):
self.info("ERROR: Only Gerber and Excellon objects can be mirrored.")
return
# Mirror axis "X" or "Y
axis = self.get_radio_value({"rb_mirror_x": "X",
"rb_mirror_y": "Y"})
mode = self.get_radio_value({"rb_mirror_box": "box",
"rb_mirror_point": "point"})
if mode == "point": # A single point defines the mirror axis
# TODO: Error handling
px, py = eval(self.point_entry.get_text())
else: # The axis is the line dividing the box in the middle
name = self.box_combo.get_active_text()
bb_obj = self.collection.get_by_name(name)
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5*(xmin+xmax)
py = 0.5*(ymin+ymax)
fcobj.mirror(axis, [px, py])
fcobj.plot()
def on_create_aligndrill(self, widget):
"""
Creates alignment holes Excellon object. Creates mirror duplicates
of the specified holes around the specified axis.
:param widget: Ignored.
:return: None
"""
# Mirror axis. Same as in on_create_mirror.
axis = self.get_radio_value({"rb_mirror_x": "X",
"rb_mirror_y": "Y"})
# TODO: Error handling
mode = self.get_radio_value({"rb_mirror_box": "box",
"rb_mirror_point": "point"})
if mode == "point":
px, py = eval(self.point_entry.get_text())
else:
name = self.box_combo.get_active_text()
bb_obj = self.collection.get_by_name(name)
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5*(xmin+xmax)
py = 0.5*(ymin+ymax)
xscale, yscale = {"X": (1.0, -1.0), "Y": (-1.0, 1.0)}[axis]
# Tools
dia = self.get_eval("entry_dblsided_alignholediam")
tools = {"1": {"C": dia}}
# Parse hole list
# TODO: Better parsing
holes = self.builder.get_object("entry_dblsided_alignholes").get_text()
holes = eval("[" + holes + "]")
drills = []
for hole in holes:
point = Point(hole)
point_mirror = affinity.scale(point, xscale, yscale, origin=(px, py))
drills.append({"point": point, "tool": "1"})
drills.append({"point": point_mirror, "tool": "1"})
def obj_init(obj_inst, app_inst):
obj_inst.tools = tools
obj_inst.drills = drills
obj_inst.create_geometry()
self.new_object("excellon", "Alignment Drills", obj_init)
def on_toggle_pointbox(self, widget):
"""
Callback for radio selection change between point and box in the
Double-sided PCB tool. Updates the UI accordingly.
:param widget: Ignored.
:return: None
"""
# Where the entry or combo go
box = self.builder.get_object("box_pointbox")
# Clear contents
children = box.get_children()
for child in children:
box.remove(child)
choice = self.get_radio_value({"rb_mirror_point": "point",
"rb_mirror_box": "box"})
if choice == "point":
self.point_entry = Gtk.Entry()
self.builder.get_object("box_pointbox").pack_start(self.point_entry,
False, False, 1)
self.point_entry.show()
else:
self.box_combo = Gtk.ComboBoxText()
self.builder.get_object("box_pointbox").pack_start(self.box_combo,
False, False, 1)
self.populate_objects_combo(self.box_combo)
self.box_combo.show()
def on_tools_doublesided(self, param):
"""
Callback for menu item Tools->Double Sided PCB Tool. Launches the
tool placing its UI in the "Tool" tab in the notebook.
:param param: Ignored.
:return: None
"""
# Were are we drawing the UI
box_tool = self.builder.get_object("box_tool")
# Remove anything else in the box
box_children = box_tool.get_children()
for child in box_children:
box_tool.remove(child)
# Get the UI
osw = self.builder.get_object("offscreenwindow_dblsided")
sw = self.builder.get_object("sw_dblsided")
osw.remove(sw)
vp = self.builder.get_object("vp_dblsided")
vp.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(1, 1, 1, 1))
# Put in the UI
box_tool.pack_start(sw, True, True, 0)
# INITIALIZATION
# Populate combo box
self.populate_objects_combo("comboboxtext_bottomlayer")
# Point entry
self.point_entry = Gtk.Entry()
box = self.builder.get_object("box_pointbox")
for child in box.get_children():
box.remove(child)
box.pack_start(self.point_entry, False, False, 1)
# Show the "Tool" tab
# self.notebook.set_current_page(3)
self.ui.notebook.set_current_page(3)
sw.show_all()
def on_toggle_units(self, widget):
"""
Callback for the Units radio-button change in the Options tab.
Changes the application's default units or the current project's units.
If changing the project's units, the change propagates to all of
the objects in the project.
:param widget: Ignored.
:return: None
"""
if self.toggle_units_ignore:
return
# Options to scale
dimensions = ['gerber_isotooldia', 'gerber_cutoutmargin', 'gerber_cutoutgapsize',
'gerber_noncoppermargin', 'gerber_bboxmargin', 'excellon_drillz',
'excellon_travelz', 'excellon_feedrate', 'cncjob_tooldia',
'geometry_cutz', 'geometry_travelz', 'geometry_feedrate',
'geometry_cnctooldia', 'geometry_painttooldia', 'geometry_paintoverlap',
'geometry_paintmargin']
def scale_options(sfactor):
for dim in dimensions:
self.options[dim] *= sfactor
# The scaling factor depending on choice of units.
factor = 1/25.4
if self.options_form.units_radio.get_value().upper() == 'MM':
factor = 25.4
# Changing project units. Warn user.
label = Gtk.Label("Changing the units of the project causes all geometrical \n" +
"properties of all objects to be scaled accordingly. Continue?")
dialog = Gtk.Dialog("Changing Project Units", self.window, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.set_default_size(150, 100)
dialog.set_modal(True)
box = dialog.get_content_area()
box.set_border_width(10)
box.add(label)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.OK:
self.options_read_form()
scale_options(factor)
self.options_write_form()
for obj in self.collection.get_list():
units = self.options_form.units_radio.get_value().upper()
obj.convert_units(units)
current = self.collection.get_active()
if current is not None:
current.to_form()
self.plot_all()
else:
# Undo toggling
self.toggle_units_ignore = True
if self.options_form.units_radio.get_value().upper() == 'MM':
self.options_form.units_radio.set_value('IN')
else:
self.options_form.units_radio.set_value('MM')
self.toggle_units_ignore = False
self.options_read_form()
self.info("Converted units to %s" % self.options["units"])
self.units_label.set_text("[" + self.options["units"] + "]")
def on_file_openproject(self, param):
"""
Callback for menu item File->Open Project. Opens a file chooser and calls
``self.open_project()`` after successful selection of a filename.
:param param: Ignored.
:return: None
"""
def on_success(app_obj, filename):
app_obj.open_project(filename)
# Runs on_success on worker
self.file_chooser_action(on_success)
def on_file_saveproject(self, param):
"""
Callback for menu item File->Save Project. Saves the project to
``self.project_filename`` or calls ``self.on_file_saveprojectas()``
if set to None. The project is saved by calling ``self.save_project()``.
:param param: Ignored.
:return: None
"""
if self.project_filename is None:
self.on_file_saveprojectas(None)
else:
self.save_project(self.project_filename)
self.register_recent("project", self.project_filename)
self.info("Project saved to: " + self.project_filename)
def on_file_saveprojectas(self, param):
"""
Callback for menu item File->Save Project As... Opens a file
chooser and saves the project to the given file via
``self.save_project()``.
:param param: Ignored.
:return: None
"""
def on_success(app_obj, filename):
assert isinstance(app_obj, App)
try:
f = open(filename, 'r')
f.close()
exists = True
except IOError:
exists = False
msg = "File exists. Overwrite?"
if exists and self.question_dialog("File exists", msg) == Gtk.ResponseType.CANCEL:
return
app_obj.save_project(filename)
self.project_filename = filename
self.register_recent("project", filename)
app_obj.info("Project saved to: " + filename)
self.file_chooser_save_action(on_success)
def on_file_saveprojectcopy(self, param):
"""
Callback for menu item File->Save Project Copy... Opens a file
chooser and saves the project to the given file via
``self.save_project``. It does not update ``self.project_filename`` so
subsequent save requests are done on the previous known filename.
:param param: Ignore.
:return: None
"""
def on_success(app_obj, filename):
assert isinstance(app_obj, App)
try:
f = open(filename, 'r')
f.close()
exists = True
except IOError:
exists = False
msg = "File exists. Overwrite?"
if exists and self.question_dialog("File exists", msg) == Gtk.ResponseType.CANCEL:
return
app_obj.save_project(filename)
self.register_recent("project", filename)
app_obj.info("Project copy saved to: " + filename)
self.file_chooser_save_action(on_success)
def on_options_app2project(self, param):
"""
Callback for Options->Transfer Options->App=>Project. Copies options
from application defaults to project defaults.
:param param: Ignored.
:return: None
"""
self.defaults_read_form()
self.options.update(self.defaults)
self.options_write_form()
def on_options_project2app(self, param):
"""
Callback for Options->Transfer Options->Project=>App. Copies options
from project defaults to application defaults.
:param param: Ignored.
:return: None
"""
self.options_read_form()
self.defaults.update(self.options)
self.defaults_write_form()
def on_options_project2object(self, param):
"""
Callback for Options->Transfer Options->Project=>Object. Copies options
from project defaults to the currently selected object.
:param param: Ignored.
:return: None
"""
self.options_read_form()
obj = self.collection.get_active()
if obj is None:
self.info("WARNING: No object selected.")
return
for option in self.options:
if option.find(obj.kind + "_") == 0:
oname = option[len(obj.kind)+1:]
obj.options[oname] = self.options[option]
obj.to_form() # Update UI
def on_options_object2project(self, param):
"""
Callback for Options->Transfer Options->Object=>Project. Copies options
from the currently selected object to project defaults.
:param param: Ignored.
:return: None
"""
obj = self.collection.get_active()
if obj is None:
self.info("WARNING: No object selected.")
return
obj.read_form()
for option in obj.options:
if option in ['name']: # TODO: Handle this better...
continue
self.options[obj.kind + "_" + option] = obj.options[option]
self.options_write_form()
def on_options_object2app(self, param):
"""
Callback for Options->Transfer Options->Object=>App. Copies options
from the currently selected object to application defaults.
:param param: Ignored.
:return: None
"""
obj = self.collection.get_active()
if obj is None:
self.info("WARNING: No object selected.")
return
obj.read_form()
for option in obj.options:
if option in ['name']: # TODO: Handle this better...
continue
self.defaults[obj.kind + "_" + option] = obj.options[option]
self.defaults_write_form()
def on_options_app2object(self, param):
"""
Callback for Options->Transfer Options->App=>Object. Copies options
from application defaults to the currently selected object.
:param param: Ignored.
:return: None
"""
self.defaults_read_form()
obj = self.collection.get_active()
if obj is None:
self.info("WARNING: No object selected.")
return
for option in self.defaults:
if option.find(obj.kind + "_") == 0:
oname = option[len(obj.kind)+1:]
obj.options[oname] = self.defaults[option]
obj.to_form() # Update UI
def on_file_savedefaults(self, param):
"""
Callback for menu item File->Save Defaults. Saves application default options
``self.defaults`` to defaults.json.
:param param: Ignored.
:return: None
"""
# Read options from file
try:
f = open("defaults.json")
options = f.read()
f.close()
except:
App.log.error("Could not load defaults file.")
self.info("ERROR: Could not load defaults file.")
return
try:
defaults = json.loads(options)
except:
e = sys.exc_info()[0]
App.log.error("Failed to parse defaults file.")
App.log.error(str(e))
self.info("ERROR: Failed to parse defaults file.")
return
# Update options
self.defaults_read_form()
defaults.update(self.defaults)
# Save update options
try:
f = open("defaults.json", "w")
json.dump(defaults, f)
f.close()
except:
self.info("ERROR: Failed to write defaults to file.")
return
self.info("Defaults saved.")
def on_options_combo_change(self, widget):
"""
Called when the combo box to choose between application defaults and
project option changes value. The corresponding variables are
copied to the UI.
:param widget: The widget from which this was called. Ignore.
:return: None
"""
combo_sel = self.ui.notebook.combo_options.get_active()
App.log.debug("Options --> %s" % combo_sel)
# Remove anything else in the box
# box_children = self.options_box.get_children()
box_children = self.ui.notebook.options_contents.get_children()
for child in box_children:
self.ui.notebook.options_contents.remove(child)
form = [self.options_form, self.defaults_form][combo_sel]
self.ui.notebook.options_contents.pack_start(form, False, False, 1)
form.show_all()
# self.options2form()
def on_canvas_configure(self, widget, event):
"""
Called whenever the canvas changes size. The axes are updated such
as to use the whole canvas.
:param widget: Ignored.
:param event: Ignored.
:return: None
"""
self.plotcanvas.auto_adjust_axes()
def on_row_activated(self, widget, path, col):
"""
Callback for selection activation (Enter or double-click) on the Project list.
Switches the notebook page to the object properties form. Calls
``self.notebook.set_current_page(1)``.
:param widget: Ignored.
:param path: Ignored.
:param col: Ignored.
:return: None
"""
# self.notebook.set_current_page(1)
self.ui.notebook.set_current_page(1)
def on_update_plot(self, widget):
"""
Callback for button on form for all kinds of objects.
Re-plots the current object only.
:param widget: The widget from which this was called. Ignored.
:return: None
"""
obj = self.collection.get_active()
obj.read_form()
self.set_progress_bar(0.5, "Plotting...")
def thread_func(app_obj):
assert isinstance(app_obj, App)
obj.plot()
GLib.timeout_add(300, lambda: app_obj.set_progress_bar(0.0, "Idle"))
# Send to worker
self.worker.add_task(thread_func, [self])
def on_excellon_tool_choose(self, widget):
"""
Callback for button on Excellon form to open up a window for
selecting tools.
:param widget: The widget from which this was called.
:return: None
"""
excellon = self.collection.get_active()
assert isinstance(excellon, FlatCAMExcellon)
excellon.show_tool_chooser()
def on_entry_eval_activate(self, widget):
"""
Called when an entry is activated (eg. by hitting enter) if
set to do so. Its text is eval()'d and set to the returned value.
The current object is updated.
:param widget:
:return:
"""
self.on_eval_update(widget)
obj = self.collection.get_active()
assert isinstance(obj, FlatCAMObj)
obj.read_form()
def on_eval_update(self, widget):
"""
Modifies the content of a Gtk.Entry by running
eval() on its contents and puting it back as a
string.
:param widget: The widget from which this was called.
:return: None
"""
# TODO: error handling here
widget.set_text(str(eval(widget.get_text())))
# def on_cncjob_exportgcode(self, widget):
# """
# Called from button on CNCjob form to save the G-Code from the object.
#
# :param widget: The widget from which this was called.
# :return: None
# """
# def on_success(app_obj, filename):
# cncjob = app_obj.collection.get_active()
# f = open(filename, 'w')
# f.write(cncjob.gcode)
# f.close()
# app_obj.info("Saved to: " + filename)
#
# self.file_chooser_save_action(on_success)
def on_delete(self, widget):
"""
Delete the currently selected FlatCAMObj.
:param widget: The widget from which this was called. Ignored.
:return: None
"""
# Keep this for later
name = copy(self.collection.get_active().options["name"])
# Remove plot
self.plotcanvas.figure.delaxes(self.collection.get_active().axes)
self.plotcanvas.auto_adjust_axes()
# Clear form
self.setup_component_editor()
# Remove from dictionary
self.collection.delete_active()
self.info("Object deleted: %s" % name)
def on_toolbar_replot(self, widget):
"""
Callback for toolbar button. Re-plots all objects.
:param widget: The widget from which this was called.
:return: None
"""
try:
self.collection.get_active().read_form()
except AttributeError:
pass
self.plot_all()
def on_clear_plots(self, widget):
"""
Callback for toolbar button. Clears all plots.
:param widget: The widget from which this was called.
:return: None
"""
self.plotcanvas.clear()
def on_file_new(self, *param):
"""
Callback for menu item File->New. Returns the application to its
startup state. This method is thread-safe.
:param param: Whatever is passed by the event. Ignore.
:return: None
"""
# Remove everything from memory
App.log.debug("on_file_bew()")
# GUI things
def task():
# Clear plot
App.log.debug(" self.plotcanvas.clear()")
self.plotcanvas.clear()
# Delete data
App.log.debug(" self.collection.delete_all()")
self.collection.delete_all()
# Clear object editor
App.log.debug(" self.setup_component_editor()")
self.setup_component_editor()
GLib.idle_add(task)
# Clear project filename
self.project_filename = None
# Re-fresh project options
self.on_options_app2project(None)
def on_filequit(self, param):
"""
Callback for menu item File->Quit. Closes the application.
:param param: Whatever is passed by the event. Ignore.
:return: None
"""
self.window.destroy()
Gtk.main_quit()
def on_closewindow(self, param):
"""
Callback for closing the main window.
:param param: Whatever is passed by the event. Ignore.
:return: None
"""
self.window.destroy()
Gtk.main_quit()
def file_chooser_action(self, on_success):
"""
Opens the file chooser and runs on_success on a separate thread
upon completion of valid file choice.
:param on_success: A function to run upon completion of a valid file
selection. Takes 2 parameters: The app instance and the filename.
Note that it is run on a separate thread, therefore it must take the
appropriate precautions when accessing shared resources.
:type on_success: func
:return: None
"""
dialog = Gtk.FileChooserDialog("Please choose a file", self.ui,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
response = dialog.run()
# Works here
# t = Gtk.TextView()
# print t
if response == Gtk.ResponseType.OK:
filename = dialog.get_filename()
dialog.destroy()
# Send to worker.
self.worker.add_task(on_success, [self, filename])
elif response == Gtk.ResponseType.CANCEL:
self.info("Open cancelled.")
dialog.destroy()
# Works here
# t = Gtk.TextView()
# print t
def file_chooser_save_action(self, on_success):
"""
Opens the file chooser and runs on_success upon completion of valid file choice.
:param on_success: A function to run upon selection of a filename. Takes 2
parameters: The instance of the application (App) and the chosen filename. This
gets run immediately in the same thread.
:return: None
"""
dialog = Gtk.FileChooserDialog("Save file", self.window,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
dialog.set_current_name("Untitled")
response = dialog.run()
if response == Gtk.ResponseType.OK:
filename = dialog.get_filename()
dialog.destroy()
on_success(self, filename)
elif response == Gtk.ResponseType.CANCEL:
self.info("Save cancelled.") # print("Cancel clicked")
dialog.destroy()
def on_fileopengerber(self, param):
"""
Callback for menu item File->Open Gerber. Defines a function that is then passed
to ``self.file_chooser_action()``. It requests the creation of a FlatCAMGerber object
and updates the progress bar throughout the process.
:param param: Ignore
:return: None
"""
# This works here.
# t = Gtk.TextView()
# print t
self.file_chooser_action(lambda ao, filename: self.open_gerber(filename))
def on_fileopenexcellon(self, param):
"""
Callback for menu item File->Open Excellon. Defines a function that is then passed
to ``self.file_chooser_action()``. It requests the creation of a FlatCAMExcellon object
and updates the progress bar throughout the process.
:param param: Ignore
:return: None
"""
self.file_chooser_action(lambda ao, filename: self.open_excellon(filename))
def on_fileopengcode(self, param):
"""
Callback for menu item File->Open G-Code. Defines a function that is then passed
to ``self.file_chooser_action()``. It requests the creation of a FlatCAMCNCjob object
and updates the progress bar throughout the process.
:param param: Ignore
:return: None
"""
self.file_chooser_action(lambda ao, filename: self.open_gcode(filename))
def on_mouse_move_over_plot(self, event):
"""
Callback for the mouse motion event over the plot. This event is generated
by the Matplotlib backend and has been registered in ``self.__init__()``.
For details, see: http://matplotlib.org/users/event_handling.html
:param event: Contains information about the event.
:return: None
"""
try: # May fail in case mouse not within axes
self.ui.position_label.set_label("X: %.4f Y: %.4f" % (
event.xdata, event.ydata))
self.mouse = [event.xdata, event.ydata]
# for subscriber in self.plot_mousemove_subscribers:
# self.plot_mousemove_subscribers[subscriber](event)
except:
self.ui.position_label.set_label("")
self.mouse = None
def on_click_over_plot(self, event):
"""
Callback for the mouse click event over the plot. This event is generated
by the Matplotlib backend and has been registered in ``self.__init__()``.
For details, see: http://matplotlib.org/users/event_handling.html
Default actions are:
* Copy coordinates to clipboard. Ex.: (65.5473, -13.2679)
:param event: Contains information about the event, like which button
was clicked, the pixel coordinates and the axes coordinates.
:return: None
"""
# So it can receive key presses
self.plotcanvas.canvas.grab_focus()
try:
App.log.debug('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % (
event.button, event.x, event.y, event.xdata, event.ydata))
self.clipboard.set_text("(%.4f, %.4f)" % (event.xdata, event.ydata), -1)
except Exception, e:
App.log.debug("Outside plot?")
App.log.debug(str(e))
def on_zoom_in(self, event):
"""
Callback for zoom-in request. This can be either from the corresponding
toolbar button or the '3' key when the canvas is focused. Calls ``self.zoom()``.
:param event: Ignored.
:return: None
"""
self.plotcanvas.zoom(1.5)
return
def on_zoom_out(self, event):
"""
Callback for zoom-out request. This can be either from the corresponding
toolbar button or the '2' key when the canvas is focused. Calls ``self.zoom()``.
:param event: Ignored.
:return: None
"""
self.plotcanvas.zoom(1 / 1.5)
def on_zoom_fit(self, event):
"""
Callback for zoom-out request. This can be either from the corresponding
toolbar button or the '1' key when the canvas is focused. Calls ``self.adjust_axes()``
with axes limits from the geometry bounds of all objects.
:param event: Ignored.
:return: None
"""
xmin, ymin, xmax, ymax = self.collection.get_bounds()
width = xmax - xmin
height = ymax - ymin
xmin -= 0.05 * width
xmax += 0.05 * width
ymin -= 0.05 * height
ymax += 0.05 * height
self.plotcanvas.adjust_axes(xmin, ymin, xmax, ymax)
def on_key_over_plot(self, event):
"""
Callback for the key pressed event when the canvas is focused. Keyboard
shortcuts are handled here. So far, these are the shortcuts:
========== ============================================
Key Action
========== ============================================
'1' Zoom-fit. Fits the axes limits to the data.
'2' Zoom-out.
'3' Zoom-in.
'm' Toggle on-off the measuring tool.
========== ============================================
:param event: Ignored.
:return: None
"""
if event.key == '1': # 1
self.on_zoom_fit(None)
return
if event.key == '2': # 2
self.plotcanvas.zoom(1 / 1.5, self.mouse)
return
if event.key == '3': # 3
self.plotcanvas.zoom(1.5, self.mouse)
return
if event.key == 'm':
if self.measure.toggle_active():
self.info("Measuring tool ON")
else:
self.info("Measuring tool OFF")
return
class BaseDraw:
def __init__(self, plotcanvas, name=None):
"""
:param plotcanvas: The PlotCanvas where the drawing tool will operate.
:type plotcanvas: PlotCanvas
"""
self.plotcanvas = plotcanvas
# Must have unique axes
charset = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890"
self.name = name or [random.choice(charset) for i in range(20)]
self.axes = self.plotcanvas.new_axes(self.name)
class DrawingObject(BaseDraw):
def __init__(self, plotcanvas, name=None):
"""
Possible objects are:
* Point
* Line
* Rectangle
* Circle
* Polygon
"""
BaseDraw.__init__(self, plotcanvas)
self.properties = {}
def plot(self):
return
def update_plot(self):
self.axes.cla()
self.plot()
self.plotcanvas.auto_adjust_axes()
class DrawingPoint(DrawingObject):
def __init__(self, plotcanvas, name=None, coord=None):
DrawingObject.__init__(self, plotcanvas)
self.properties.update({
"coordinate": coord
})
def plot(self):
x, y = self.properties["coordinate"]
self.axes.plot(x, y, 'o')
class Measurement:
def __init__(self, container, plotcanvas, update=None):
self.update = update
self.container = container
self.frame = None
self.label = None
self.point1 = None
self.point2 = None
self.active = False
self.plotcanvas = plotcanvas
self.click_subscription = None
self.move_subscription = None
def toggle_active(self, *args):
if self.active: # Deactivate
self.active = False
self.container.remove(self.frame)
if self.update is not None:
self.update()
self.plotcanvas.mpl_disconnect(self.click_subscription)
self.plotcanvas.mpl_disconnect(self.move_subscription)
return False
else: # Activate
App.log.debug("DEBUG: Activating Measurement Tool...")
self.active = True
self.click_subscription = self.plotcanvas.mpl_connect("button_press_event", self.on_click)
self.move_subscription = self.plotcanvas.mpl_connect('motion_notify_event', self.on_move)
self.frame = Gtk.Frame()
self.frame.set_margin_right(5)
self.frame.set_margin_top(3)
align = Gtk.Alignment()
align.set(0, 0.5, 0, 0)
align.set_padding(4, 4, 4, 4)
self.label = Gtk.Label()
self.label.set_label("Click on a reference point...")
abox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
abox.pack_start(Gtk.Image.new_from_file('share/measure16.png'), False, False, 0)
abox.pack_start(self.label, False, False, 0)
align.add(abox)
self.frame.add(align)
self.container.pack_end(self.frame, False, True, 1)
self.frame.show_all()
return True
def on_move(self, event):
if self.point1 is None:
self.label.set_label("Click on a reference point...")
else:
try:
dx = event.xdata - self.point1[0]
dy = event.ydata - self.point1[1]
d = sqrt(dx**2 + dy**2)
self.label.set_label("D = %.4f D(x) = %.4f D(y) = %.4f" % (d, dx, dy))
except TypeError:
pass
if self.update is not None:
self.update()
def on_click(self, event):
if self.point1 is None:
self.point1 = (event.xdata, event.ydata)
else:
self.point2 = copy(self.point1)
self.point1 = (event.xdata, event.ydata)
self.on_move(event)
| mit |
jmschrei/scikit-learn | sklearn/datasets/tests/test_base.py | 33 | 6143 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
trachelr/mne-python | examples/decoding/plot_decoding_csp_eeg.py | 9 | 5605 | """
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP.
Here the classifier is applied to features extracted on CSP filtered signals.
See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
The EEGBCI dataset is documented in [2]
The data set is available at PhysioNet [3]
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
[2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
(BCI) System. IEEE TBME 51(6):1034-1043
[3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource for
Complex Physiologic Signals. Circulation 101(23):e215-e220
"""
# Authors: Martin Billinger <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, pick_types
from mne.io import concatenate_raws
from mne.io.edf import read_raw_edf
from mne.datasets import eegbci
from mne.event import find_events
from mne.decoding import CSP
from mne.channels import read_layout
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, preload=True) for f in raw_fnames]
raw = concatenate_raws(raw_files)
# strip channel names
raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]
# Apply band-pass filter
raw.filter(7., 30., method='iir')
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True, add_eeg_ref=False)
epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
labels = epochs.events[:, -1] - 2
###############################################################################
# Classification with linear discrimant analysis
from sklearn.lda import LDA # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Assemble a classifier
svc = LDA()
csp = CSP(n_components=4, reg=None, log=True)
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
# Use scikit-learn Pipeline with cross_val_score function
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score # noqa
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
evoked = epochs.average()
evoked.data = csp.patterns_.T
evoked.times = np.arange(evoked.data.shape[0])
layout = read_layout('EEG1005')
evoked.plot_topomap(times=[0, 1, 2, 61, 62, 63], ch_type='eeg', layout=layout,
scale_time=1, time_format='%i', scale=1,
unit='Patterns (AU)', size=1.5)
###############################################################################
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
svc.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(svc.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/tests/test_hmm.py | 31 | 28118 | from __future__ import print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| bsd-3-clause |
wolverton-research-group/qmpy | qmpy/web/views/data/references.py | 1 | 2137 | import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from io import BytesIO
import urllib
import base64
from django.shortcuts import render_to_response
from django.template import RequestContext
from ..tools import get_globals
from qmpy.models import Author, Journal, Reference, Entry
from qmpy.utils import *
def reference_view(request, reference_id):
ref = Reference.objects.get(id=reference_id)
data = get_globals()
data["reference"] = ref
return render_to_response(
"data/reference/paper.html", data, RequestContext(request)
)
def journal_view(request, journal_id):
journal = Journal.objects.get(id=journal_id)
dates = journal.references.values_list("year", flat=True)
plt.hist(dates)
plt.xlabel("Year")
plt.ylabel("# of publications with new materials")
img = BytesIO()
plt.savefig(img, dpi=75, bbox_inches="tight", format="png")
img.seek(0)
data_uri = base64.b64encode(img.read())
data_uri = "data:image/png;base64," + urllib.parse.quote(data_uri)
plt.close()
some_entries = Entry.objects.filter(reference__journal=journal)[:20]
data = get_globals()
data.update({"journal": journal, "hist": data_uri, "entries": some_entries})
return render_to_response(
"data/reference/journal.html", data, RequestContext(request)
)
def author_view(request, author_id):
author = Author.objects.get(id=author_id)
materials = Entry.objects.filter(reference__author_set=author)
coauths = {}
for co in Author.objects.filter(references__author_set=author):
papers = Reference.objects.filter(author_set=author)
papers = papers.filter(author_set=co)
mats = Entry.objects.filter(reference__in=papers)
data = {
"papers": papers.distinct().count(),
"materials": mats.distinct().count(),
}
coauths[co] = data
data = get_globals()
data.update({"author": author, "materials": materials, "coauthors": coauths})
return render_to_response(
"data/reference/author.html", data, RequestContext(request)
)
| mit |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/user_interfaces/wxcursor_demo.py | 9 | 2167 | """
Example to draw a cursor and report the data coords in wx
"""
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from numpy import arange, sin, pi
import wx
class CanvasFrame(wx.Frame):
def __init__(self, ):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
self.axes.plot(t,s)
self.axes.set_xlabel('t')
self.axes.set_ylabel('sin(t)')
self.figure_canvas = FigureCanvas(self, -1, self.figure)
# Note that event is a MplEvent
self.figure_canvas.mpl_connect('motion_notify_event', self.UpdateStatusBar)
self.figure_canvas.Bind(wx.EVT_ENTER_WINDOW, self.ChangeCursor)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.figure_canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.statusBar = wx.StatusBar(self, -1)
self.statusBar.SetFieldsCount(1)
self.SetStatusBar(self.statusBar)
self.toolbar = NavigationToolbar2Wx(self.figure_canvas)
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.toolbar.Show()
def ChangeCursor(self, event):
self.figure_canvas.SetCursor(wx.StockCursor(wx.CURSOR_BULLSEYE))
def UpdateStatusBar(self, event):
if event.inaxes:
x, y = event.xdata, event.ydata
self.statusBar.SetStatusText(( "x= " + str(x) +
" y=" +str(y) ),
0)
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
self.SetTopWindow(frame)
frame.Show(True)
return True
if __name__=='__main__':
app = App(0)
app.MainLoop()
| mit |
ros2/demos | topic_monitor/topic_monitor/scripts/topic_monitor.py | 1 | 17686 | # Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import re
from threading import Lock, Thread
import time
import rclpy
import rclpy.logging
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
from std_msgs.msg import Float32, Header
QOS_DEPTH = 10
logger = rclpy.logging.get_logger('topic_monitor')
class MonitoredTopic:
"""Monitor for the statistics and status of a single topic."""
def __init__(self, topic_id, stale_time, lock):
self.expected_value = None
self.expected_value_timer = None
self.initial_value = None
self.lock = lock
self.received_values = []
self.reception_rate_over_time = []
self.stale_time = stale_time
self.status = 'Offline'
self.status_changed = False
self.time_of_last_data = None
self.topic_id = topic_id
def increment_expected_value(self):
with self.lock:
if self.expected_value is not None:
self.expected_value += 1
def allowed_latency_timer_callback(self):
self.allowed_latency_timer.cancel()
self.expected_value_timer.reset()
def get_data_from_msg(self, msg):
data = msg.frame_id
idx = data.find('_')
data = data[:idx] if idx != -1 else data
return int(data) if data else 0
def topic_data_callback(self, msg, logger_=logger):
received_value = self.get_data_from_msg(msg)
logger_.info('%s: %s' % (self.topic_id, str(received_value)))
status = 'Alive'
with self.lock:
if self.expected_value is None:
# This is the first value from the topic
self.expected_value = received_value
self.initial_value = received_value
self.allowed_latency_timer.reset()
if received_value == -1:
# The topic was previously offline
status = 'Offline'
self.expected_value_timer.cancel()
self.expected_value = None
else:
self.expected_value_timer.cancel()
self.received_values.append(received_value)
self.expected_value = received_value + 1
self.allowed_latency_timer.reset()
self.time_of_last_data = time.time() # TODO(dhood): time stamp of msg
status_changed = status != self.status
self.status_changed |= status_changed # don't clear the flag before check_status
self.status = status
def check_status(self, current_time=time.time()):
# A status could have changed if a topic goes offline or comes back online
status_changed = self.status_changed
# Additionally we check if it has gone stale:
if self.status != 'Offline':
elapsed_time = current_time - self.time_of_last_data
if elapsed_time > self.stale_time:
status_changed |= self.status != 'Stale'
self.status = 'Stale'
self.status_changed = False
return status_changed
def current_reception_rate(self, window_size):
rate = None
if self.status != 'Offline':
expected_values = range(
max(self.initial_value, self.expected_value - window_size),
self.expected_value)
# How many of the expected values have been received?
count = len(set(expected_values) & set(self.received_values))
rate = count / len(expected_values)
return rate
class TopicMonitor:
"""Monitor of a set of topics that match a specified topic name pattern."""
def __init__(self, window_size):
self.data_topic_pattern = re.compile(r'(/(?P<data_name>\w*)_data_?(?P<reliability>\w*))')
self.monitored_topics = {}
self.monitored_topics_lock = Lock()
self.publishers = {}
self.reception_rate_topic_name = 'reception_rate'
self.status_changed = False
self.window_size = window_size
def add_monitored_topic(
self, topic_type, topic_name, node, qos_profile,
expected_period=1.0, allowed_latency=1.0, stale_time=1.0):
# Create a subscription to the topic
monitored_topic = MonitoredTopic(topic_name, stale_time, lock=self.monitored_topics_lock)
node_logger = node.get_logger()
node_logger.info('Subscribing to topic: %s' % topic_name)
sub = node.create_subscription(
topic_type,
topic_name,
functools.partial(monitored_topic.topic_data_callback, logger_=node_logger),
qos_profile)
assert sub # prevent unused warning
# Create a timer for maintaining the expected value received on the topic
expected_value_timer = node.create_timer(
expected_period, monitored_topic.increment_expected_value)
expected_value_timer.cancel()
# Create a one-shot timer that won't start the expected value timer until the allowed
# latency has elapsed
allowed_latency_timer = node.create_timer(
allowed_latency, monitored_topic.allowed_latency_timer_callback)
allowed_latency_timer.cancel()
# Create a publisher for the reception rate of the topic
reception_rate_topic_name = self.reception_rate_topic_name + topic_name
# TODO(dhood): remove this workaround
# once https://github.com/ros2/rmw_connext/issues/234 is resolved
reception_rate_topic_name += '_'
node.get_logger().info(
'Publishing reception rate on topic: %s' % reception_rate_topic_name)
reception_rate_publisher = node.create_publisher(
Float32, reception_rate_topic_name, 10)
with self.monitored_topics_lock:
monitored_topic.expected_value_timer = expected_value_timer
monitored_topic.allowed_latency_timer = allowed_latency_timer
self.publishers[topic_name] = reception_rate_publisher
self.monitored_topics[topic_name] = monitored_topic
def is_supported_type(self, type_name):
return type_name == 'std_msgs/msg/Header'
def get_topic_info(self, topic_name):
"""Infer topic info (e.g. QoS reliability) from the topic name."""
match = re.search(self.data_topic_pattern, topic_name)
if match and match.groups():
if match.groups()[0] != topic_name:
# Only part of the topic name matches
return None
topic_info = {'reliability': 'reliable'}
topic_info['topic_name'] = topic_name
topic_info['data_name'] = match.group('data_name')
reliability = match.group('reliability')
if reliability == 'best_effort':
topic_info['reliability'] = 'best_effort'
return topic_info
def update_topic_statuses(self):
any_status_changed = False
current_time = time.time()
with self.monitored_topics_lock:
for topic_id, monitored_topic in self.monitored_topics.items():
status_changed = monitored_topic.check_status(current_time)
any_status_changed |= status_changed
return any_status_changed
def output_status(self):
logger.info('---------------')
with self.monitored_topics_lock:
for topic_id, monitored_topic in self.monitored_topics.items():
logger.info('%s: %s' % (topic_id, monitored_topic.status))
logger.info('---------------')
def check_status(self):
status_changed = self.update_topic_statuses()
if status_changed:
self.output_status()
return status_changed
def calculate_statistics(self):
with self.monitored_topics_lock:
for topic_id, monitored_topic in self.monitored_topics.items():
rate = monitored_topic.current_reception_rate(self.window_size)
monitored_topic.reception_rate_over_time.append(rate)
rateMsg = Float32()
rateMsg.data = rate if rate is not None else 0.0
self.publishers[topic_id].publish(rateMsg)
def get_window_size(self):
return self.window_size
class TopicMonitorDisplay:
"""Display of the monitored topic reception rates."""
def __init__(self, topic_monitor, update_period):
self.colors = 'bgrcmykw'
self.markers = 'o>sp*hDx+'
self.monitored_topics = []
self.reception_rate_plots = {}
self.start_time = time.time()
self.topic_count = 0
self.topic_monitor = topic_monitor
self.x_data = []
self.x_range = 120 # points
self.x_range_s = self.x_range * update_period # seconds
self.make_plot()
def make_plot(self):
self.fig = plt.figure()
plt.title('Reception rate over time')
plt.xlabel('Time (s)')
plt.ylabel('Reception rate (last %i msgs)' % self.topic_monitor.get_window_size())
self.ax = self.fig.get_axes()[0]
self.ax.axis([0, self.x_range_s, 0, 1.1])
# Shrink axis' height to make room for legend
shrink_amnt = 0.2
box = self.ax.get_position()
self.ax.set_position(
[box.x0, box.y0 + box.height * shrink_amnt, box.width, box.height * (1 - shrink_amnt)])
def add_monitored_topic(self, topic_name):
# Make first instance of the line so that we only have to update it later
line, = self.ax.plot(
[], [], '-', color=self.colors[self.topic_count % len(self.colors)],
marker=self.markers[self.topic_count % len(self.markers)], label=topic_name)
self.reception_rate_plots[topic_name] = line
# Update the plot legend to include the new line
self.ax.legend(
loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox=True, shadow=True, ncol=2)
self.topic_count += 1
self.monitored_topics.append(topic_name)
def update_display(self):
now = time.time()
now_relative = now - self.start_time
self.x_data.append(now_relative)
with self.topic_monitor.monitored_topics_lock:
for topic_name, monitored_topic in self.topic_monitor.monitored_topics.items():
if topic_name not in self.monitored_topics:
self.add_monitored_topic(topic_name)
y_data = monitored_topic.reception_rate_over_time
line = self.reception_rate_plots[topic_name]
line.set_ydata(y_data)
line.set_xdata(self.x_data[-len(y_data):])
# Make the line slightly transparent if the topic is stale
line.set_alpha(0.5 if monitored_topic.status == 'Stale' else 1.0)
self.ax.axis(
[now_relative - self.x_range_s, now_relative, 0, 1.1])
self.fig.canvas.draw()
plt.pause(0.0001)
plt.show(block=False)
class DataReceivingThread(Thread):
def __init__(self, topic_monitor, options):
super(DataReceivingThread, self).__init__()
rclpy.init()
self.topic_monitor = topic_monitor
self.options = options
def run(self):
self.node = rclpy.create_node('topic_monitor')
try:
run_topic_listening(self.node, self.topic_monitor, self.options)
except KeyboardInterrupt:
self.stop()
raise
def stop(self):
self.node.destroy_node()
rclpy.shutdown()
def run_topic_listening(node, topic_monitor, options):
"""Subscribe to relevant topics and manage the data received from susbcriptions."""
already_ignored_topics = set()
while rclpy.ok():
# Check if there is a new topic online
# TODO(dhood): use graph events rather than polling
topic_names_and_types = node.get_topic_names_and_types()
for topic_name, type_names in topic_names_and_types:
# Infer the appropriate QoS profile from the topic name
topic_info = topic_monitor.get_topic_info(topic_name)
if topic_info is None:
# The topic is not for being monitored
continue
if len(type_names) != 1:
if topic_name not in already_ignored_topics:
node.get_logger().info(
"Warning: ignoring topic '%s', which has more than one type: [%s]"
% (topic_name, ', '.join(type_names)))
already_ignored_topics.add(topic_name)
continue
type_name = type_names[0]
if not topic_monitor.is_supported_type(type_name):
if topic_name not in already_ignored_topics:
node.get_logger().info(
"Warning: ignoring topic '%s' because its message type (%s)"
'is not supported.'
% (topic_name, type_name))
already_ignored_topics.add(topic_name)
continue
is_new_topic = topic_name and topic_name not in topic_monitor.monitored_topics
if is_new_topic:
# Register new topic with the monitor
qos_profile = QoSProfile(depth=10)
qos_profile.depth = QOS_DEPTH
if topic_info['reliability'] == 'best_effort':
qos_profile.reliability = \
QoSReliabilityPolicy.BEST_EFFORT
topic_monitor.add_monitored_topic(
Header, topic_name, node, qos_profile,
options.expected_period, options.allowed_latency, options.stale_time)
# Wait for messages with a timeout, otherwise this thread will block any other threads
# until a message is received
rclpy.spin_once(node, timeout_sec=0.05)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-d', '--display', dest='show_display', action='store_true', default=False,
help='Display the reception rate of topics (requires matplotlib)')
parser.add_argument(
'-t', '--expected-period', type=float, nargs='?', default=0.5,
help='Expected time in seconds between received messages on a topic')
parser.add_argument(
'-s', '--stale-time', type=float, nargs='?', default=1.0,
help='Time in seconds without receiving messages before a topic is considered stale')
parser.add_argument(
'-l', '--allowed-latency', type=float, nargs='?', default=1.0,
help='Allowed latency in seconds between receiving expected messages')
parser.add_argument(
'-c', '--stats-calc-period', type=float, nargs='?', default=1.0,
help='Time in seconds between calculating topic statistics')
parser.add_argument(
'-n', '--window-size', type=int, nargs='?', default=20,
help='Number of messages in calculation of topic statistics')
args = parser.parse_args()
if args.show_display:
try:
global plt
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError('The --display option requires matplotlib to be installed')
topic_monitor = TopicMonitor(args.window_size)
try:
# Run two infinite loops simultaneously: one for receiving data (subscribing to topics and
# handling callbacks), and another for processing the received data (calculating the
# reception rate and publishing/displaying it).
# Since the display needs to happen in the main thread, we run the "data processing" loop
# in the main thread and run the "data receiving" loop in a secondary thread.
# Start the "data receiving" loop in a new thread
data_receiving_thread = DataReceivingThread(topic_monitor, args)
data_receiving_thread.start()
# Start the "data processing" loop in the main thread
# Process the data that has been received from topic subscriptions
if args.show_display:
topic_monitor_display = TopicMonitorDisplay(topic_monitor, args.stats_calc_period)
last_time = time.time()
while data_receiving_thread.is_alive():
now = time.time()
if now - last_time > args.stats_calc_period:
last_time = now
topic_monitor.check_status()
topic_monitor.calculate_statistics()
if args.show_display:
topic_monitor_display.update_display()
# sleep the main thread so background threads can do work
time_to_sleep = args.stats_calc_period - (time.time() - now)
if time_to_sleep > 0:
time.sleep(time_to_sleep)
finally:
if data_receiving_thread.is_alive():
data_receiving_thread.stop()
# Block this thread until the other thread terminates
data_receiving_thread.join()
if __name__ == '__main__':
main()
| apache-2.0 |
IshankGulati/scikit-learn | sklearn/mixture/gmm.py | 19 | 32365 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@deprecated("The function sample_gaussian is deprecated in 0.18"
" and will be removed in 0.20."
" Use numpy.random.multivariate_normal instead.")
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array
Randomly generated sample. The shape depends on `n_samples`:
(n_features,) if `1`
(n_features, n_samples) otherwise
"""
_sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None)
def _sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state : RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = _sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic : float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic : float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
"""
Legacy Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.GaussianMixture` instead.
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The functon distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
datapythonista/pandas | pandas/tests/arrays/categorical/test_dtypes.py | 4 | 7358 | import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import (
Categorical,
CategoricalIndex,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
class TestCategoricalDtypes:
def test_is_dtype_equal_deprecated(self):
# GH#37545
c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False)
with tm.assert_produces_warning(FutureWarning):
c1.is_dtype_equal(c1)
def test_categories_match_up_to_permutation(self):
# test dtype comparisons between cats
c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False)
c2 = Categorical(list("aabca"), categories=list("cab"), ordered=False)
c3 = Categorical(list("aabca"), categories=list("cab"), ordered=True)
assert c1._categories_match_up_to_permutation(c1)
assert c2._categories_match_up_to_permutation(c2)
assert c3._categories_match_up_to_permutation(c3)
assert c1._categories_match_up_to_permutation(c2)
assert not c1._categories_match_up_to_permutation(c3)
assert not c1._categories_match_up_to_permutation(Index(list("aabca")))
assert not c1._categories_match_up_to_permutation(c1.astype(object))
assert c1._categories_match_up_to_permutation(CategoricalIndex(c1))
assert c1._categories_match_up_to_permutation(
CategoricalIndex(c1, categories=list("cab"))
)
assert not c1._categories_match_up_to_permutation(
CategoricalIndex(c1, ordered=True)
)
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1._categories_match_up_to_permutation(s1)
assert c2._categories_match_up_to_permutation(s2)
assert c3._categories_match_up_to_permutation(s3)
assert c1._categories_match_up_to_permutation(s2)
assert not c1._categories_match_up_to_permutation(s3)
assert not c1._categories_match_up_to_permutation(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(["a", "b", "c"]))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(list("abcd")))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list("abcd")))
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_dtype_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(["a", "b", "c"], ["d", "e"])
result = c._set_dtype(CategoricalDtype(["a", "b"]))
expected = Categorical([None, None, None], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = Categorical([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
result = Categorical([f"foo{i:05d}" for i in range(40000)])
assert result.codes.dtype == "int32"
# adding cats
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = result.add_categories([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
# removing cats
result = result.remove_categories([f"foo{i:05d}" for i in range(300)])
assert result.codes.dtype == "int8"
@pytest.mark.parametrize("ordered", [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list("abbaaccc"), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype="int")
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("cat_ordered", [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH 10696/18593
data = list("abcaacbab")
cat = Categorical(data, categories=list("bac"), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype("category")
expected = cat
tm.assert_categorical_equal(result, expected)
def test_iter_python_types(self):
# GH-19909
cat = Categorical([1, 2])
assert isinstance(list(cat)[0], int)
assert isinstance(cat.tolist()[0], int)
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp("2017-01-01"), Timestamp("2017-01-02")])
assert isinstance(list(cat)[0], Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
| bsd-3-clause |
vkuznet/rep | tests/test_folding.py | 1 | 3126 | from __future__ import division, print_function, absolute_import
import numpy
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingRegressor
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, roc_auc_score
from rep.estimators import SklearnClassifier, SklearnRegressor
from rep.metaml import FoldingRegressor, FoldingClassifier
from rep.test.test_estimators import generate_classification_data, check_classification_model
__author__ = 'antares'
def check_folding(classifier, check_instance=True, has_staged_pp=True, has_importances=True):
X, y, sample_weight = generate_classification_data(distance=0.6)
assert classifier == classifier.fit(X, y, sample_weight=sample_weight)
assert list(classifier.features) == list(X.columns)
check_classification_model(classifier, X, y, check_instance=check_instance, has_staged_pp=has_staged_pp,
has_importances=has_importances)
def mean_vote(x):
return numpy.mean(x, axis=0)
labels = classifier.predict(X, mean_vote)
proba = classifier.predict_proba(X, mean_vote)
assert numpy.all(proba == classifier.predict_proba(X, mean_vote))
score = accuracy_score(y, labels)
print(score)
assert score > 0.7
assert numpy.allclose(proba.sum(axis=1), 1), 'probabilities do not sum to 1'
assert numpy.all(proba >= 0.), 'negative probabilities'
auc_score = roc_auc_score(y, proba[:, 1])
print(auc_score)
assert auc_score > 0.8
if has_staged_pp:
for p in classifier.staged_predict_proba(X, mean_vote):
assert p.shape == (len(X), 2)
# checking that last iteration coincides with previous
assert numpy.all(p == proba)
def test_folding_regressor(n_samples=100, n_features=3):
"""
checking mostly different things with quality of predictions and not using train data during predictions.
"""
from sklearn.metrics import mean_squared_error
X = numpy.random.normal(size=[n_samples, n_features])
y = numpy.random.normal(size=n_samples)
kfolder = FoldingRegressor(SklearnRegressor(GradientBoostingRegressor()), n_folds=2)
kfolder.fit(X, y)
preds = kfolder.predict(X)
# checking that we fitted fine
assert mean_squared_error(y, preds) > mean_squared_error(y * 0., preds) * 0.5
# shuffled preds
p = numpy.random.permutation(n_samples)
preds2 = kfolder.predict(X[p])[numpy.argsort(p)]
# Now let's compare this with shuffled kFolding:
assert mean_squared_error(y, preds) > mean_squared_error(y, preds2) * 0.5
preds_mean = kfolder.predict(X, vote_function=lambda x: numpy.mean(x, axis=0))
# Now let's compare this with mean prediction:
assert mean_squared_error(y, preds) > mean_squared_error(y, preds_mean)
def test_folding_classifier():
base_ada = SklearnClassifier(AdaBoostClassifier())
folding_str = FoldingClassifier(base_ada, n_folds=2)
check_folding(folding_str, True, True, True)
base_svm = SklearnClassifier(SVC())
folding_str = FoldingClassifier(base_svm, n_folds=4)
check_folding(folding_str, True, False, False) | apache-2.0 |
liberatorqjw/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 21 | 13756 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
"""Test csc_row_median actually calculates the median."""
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
nextgenusfs/ufits | amptk/filter.py | 2 | 31579 | #!/usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import argparse
import math
from Bio import SeqIO
from natsort import natsorted
import pandas as pd
import numpy as np
import amptk.amptklib as lib
class colr(object):
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=50)
def main(args):
parser=argparse.ArgumentParser(prog='amptk-filter.py',
description='''Script inspects output of amptk-OTU_cluster.py and
determines useful threshold for OTU output based on a spike-in
mock community.''',
epilog="""Written by Jon Palmer (2015) [email protected]""",
formatter_class=MyFormatter)
parser.add_argument('-i','--otu_table', required=True, help='Input OTU table')
parser.add_argument('-f','--fasta', required=True, help='Input OTUs (multi-fasta)')
parser.add_argument('-b','--mock_barcode', help='Barocde of Mock community')
parser.add_argument('-p','--index_bleed', help='Index Bleed filter. Default: auto')
parser.add_argument('-t','--threshold', default='max', choices=['sum','max','top25','top10','top5'],help='Threshold to use when calculating index-bleed')
parser.add_argument('-c','--calculate', default='all', choices=['all', 'in'], help='Calculate index-bleed, if synthetic mock use all otherwise use in')
parser.add_argument('-s','--subtract', default=0, help='Threshold to subtract')
parser.add_argument('-n','--normalize', default='y', choices=['y','n'], help='Normalize OTU table prior to filtering')
parser.add_argument('-m','--mc', help='Multi-FASTA mock community')
parser.add_argument('-d','--drop', nargs='+', help='samples to drop from table after index-bleed filtering')
parser.add_argument('--ignore', nargs='+', help='Ignore OTUs during index-bleed')
parser.add_argument('--delimiter', default='tsv', choices=['csv','tsv'], help='Delimiter')
parser.add_argument('--col_order', nargs='+', dest="col_order", help='Provide space separated list')
parser.add_argument('--keep_mock', action='store_true', help='Keep mock sample in OTU table (Default: False)')
parser.add_argument('--show_stats', action='store_true', help='Show stats datatable STDOUT')
parser.add_argument('--negatives', nargs='+', help='Negative Control Sample names')
parser.add_argument('-o','--out', help='Base output name')
parser.add_argument('--min_reads_otu', default=2, type=int, help='Minimum number of reads per OTU for experiment')
parser.add_argument('--min_samples_otu', default=1, type=int, help='Minimum number of samples per OTU for experiment')
parser.add_argument('-u','--usearch', dest="usearch", default='usearch9', help='USEARCH9 EXE')
parser.add_argument('--debug', action='store_true', help='Remove Intermediate Files')
args=parser.parse_args(args)
parentdir = os.path.join(os.path.dirname(lib.__file__))
if not args.out:
#get base name of files
base = args.otu_table.split(".otu_table")[0]
else:
base = args.out
#remove logfile if exists
log_name = base + '.amptk-filter.log'
lib.removefile(log_name)
lib.setupLogging(log_name)
FNULL = open(os.devnull, 'w')
cmd_args = " ".join(sys.argv)+'\n'
lib.log.debug(cmd_args)
print("-------------------------------------------------------")
#initialize script, log system info and usearch version
lib.SystemInfo()
#Do a version check
usearch = args.usearch
lib.versionDependencyChecks(usearch, method='vsearch')
#check if otu_table is empty
lib.log.info("Loading OTU table: %s" % args.otu_table)
check = os.stat(args.otu_table).st_size
if check == 0:
lib.log.error("Input OTU table is empty")
sys.exit(1)
#get the OTU header info (depending on how OTU table was constructed, this might be different, so find it as you need for indexing)
with open(args.otu_table, 'r') as f:
first_line = f.readline()
OTUhead = first_line.split('\t')[0]
if args.delimiter == 'csv':
delim = str(',')
ending = '.csv'
elif args.delimiter == 'tsv':
delim = str('\t')
ending = '.txt'
#setup outputs
sorted_table = base+'.sorted'+ending
normal_table_pct = base+'.normalized.pct'+ending
normal_table_nums = base+'.normalized.num'+ending
subtract_table = base+'.normalized.subtract'+ending
filtered_table = base+'.normalized'+ending
final_table = base+'.final'+ending
final_binary_table = base+'.final.binary'+ending
stats_table = base+'.stats'+ending
#load OTU table into pandas DataFrame
df = pd.read_csv(args.otu_table, sep='\t')
df.set_index(OTUhead, inplace=True)
headers = df.columns.values.tolist()
if headers[-1] == 'taxonomy' or headers[-1] == 'Taxonomy':
otuDict = df[headers[-1]].to_dict()
del df[headers[-1]]
else:
otuDict = False
#parse OTU table to get count data for each OTU
AddCounts = {}
OTUcounts = df.sum(1)
for x in OTUcounts.index:
AddCounts[x] = int(OTUcounts[x])
#now add counts to fasta header
FastaCounts = base+'.otus.counts.fa'
OTU_tax = {}
with open(FastaCounts, 'w') as outfile:
with open(args.fasta, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if ';' in rec.id: #this should mean there is taxonomy, so split it
ID = rec.id.split(';',1)[0]
tax = rec.id.split(';',1)[-1]
OTU_tax[ID] = tax
if ID in AddCounts:
count = AddCounts.get(ID)
else:
count = 0
outfile.write('>%s;size=%i\n%s\n' % (ID, count, rec.seq))
else: #no tax, just process
if rec.id in AddCounts:
count = AddCounts.get(rec.id)
else:
count = 0
outfile.write('>%s;size=%i\n%s\n' % (rec.id, count, rec.seq))
lib.log.info('OTU table contains {:,} samples, {:,} OTUs, and {:,} reads counts'.format(len(df.columns.values.tolist()), len(df.index), int(df.values.sum())))
#setup output files/variables
mock_out = base + '.mockmap.txt'
if args.mock_barcode: #if user passes a column name for mock
#check if mock barcode is valid
validBCs = df.columns.values.tolist()
if not args.mock_barcode in validBCs:
lib.log.error("%s not a valid barcode." % args.mock_barcode)
lib.log.error("Valid barcodes: %s" % (' '.join(validBCs)))
sys.exit(1)
if args.col_order and not args.mock_barcode in args.col_order:
lib.log.error("Error: %s not listed in --col_order." % args.mock_barcode)
sys.exit(1)
#make sure there is a --mc passed here otherwise throw error
if not args.mc:
lib.log.error("If using the -b,--barcode option you must specify a fasta file of mock community via the --mc option")
sys.exit(1)
#get default mock community value
if args.mc == "mock3":
mockFile = os.path.join(parentdir, 'DB', 'amptk_mock3.fa')
elif args.mc == "mock2":
mockFile = os.path.join(parentdir, 'DB', 'amptk_mock2.fa')
elif args.mc == "mock1":
mockFile = os.path.join(parentdir, 'DB', 'amptk_mock1.fa')
elif args.mc == "synmock":
mockFile = os.path.join(parentdir, 'DB', 'amptk_synmock.fa')
else:
mockFile = os.path.abspath(args.mc)
#open mock community fasta and count records
mock_ref_count = lib.countfasta(mockFile)
#load OTU lengths into dictionary
SeqLength = lib.fastalen2dict(args.fasta)
#map OTUs to mock community, this is fast enough, but running twice, first to get only top hit, then
lib.log.info("Mapping OTUs to Mock Community (USEARCH)")
cmd = ['vsearch', '-usearch_global', mockFile, '--strand', 'plus',
'--id', '0.65','--db', FastaCounts, '--userout', mock_out,
'--userfields', 'query+target+id+ql+tl+alnlen+caln+mism+gaps',
'--maxaccepts', '0', '--maxrejects', '0']
lib.runSubprocess(cmd, lib.log)
#generate dictionary for name change
'''
If args.calculate is set to all, that means the script is trying to measure a synthetic
mock of some kind. if that is the case, then chimeras are < 95% identical to mock members
and variants would be hits in between, i.e 95% > but not the best hit.
'''
Results = {}
errorrate = {}
with open(mock_out, 'r') as map:
for line in map:
line = line.replace('\n', '')
cols = line.split('\t')
MockID = cols[0]
hit = cols[1].split(';size=')
otuID = hit[0]
abundance = int(hit[1])
pident = float(cols[2])
length = int(cols[4])
mism = int(cols[7])
gaps = int(cols[8])
diffs = gaps + mism
score = abundance * pident * length
if not otuID in errorrate:
errorrate[otuID] = [MockID,diffs]
else:
olderror = errorrate.get(otuID)
if diffs < olderror[1]:
errorrate[otuID] = [MockID,diffs]
if not MockID in Results:
Results[MockID] = [(otuID,abundance,pident,length,mism,diffs,score)]
else:
Results[MockID].append((otuID,abundance,pident,length,mism,diffs,score))
found_dict = {}
chimeras = []
variants = []
missing = []
for k,v in natsorted(list(Results.items())):
besthit = []
#v is a list of tuples of results, parse through to get best hit
for y in v:
if y[2] >= 97.0:
besthit.append(y)
elif y[2] >= 95.0 and y[2] < 97.0:
if not y[0] in variants:
variants.append(y[0])
else:
if not y[0] in chimeras:
chimeras.append(y[0])
if len(besthit) > 0:
besthit.sort(key=lambda x: x[1], reverse=True)
best = sorted(besthit[:3], key=lambda x: x[6], reverse=True)
found_dict[k] = best[0]
else:
missing.append(k)
#make name change dict
annotate_dict = {}
seen = []
for k,v in natsorted(list(found_dict.items())):
ID = v[0].replace('_chimera', '')
newID = k+'_pident='+str(v[2])+'_'+v[0]
annotate_dict[ID] = newID
if not v[0] in seen:
seen.append(v[0])
if args.calculate == 'all':
chimeras = [x for x in chimeras if x not in seen]
variants = [x for x in variants if x not in seen]
for i in chimeras:
annotate_dict[i] = i+'_suspect_mock_chimera'
for x in variants:
annotate_dict[x] = x+'_suspect_mock_variant'
if len(missing) > 0:
lib.log.info("%i mock missing: %s" % (len(missing), ', '.join(missing)))
else:
otu_new = args.fasta
#rename OTUs
if args.mock_barcode:
df.rename(index=annotate_dict, inplace=True)
#sort the table
df2 = df.reindex(index=natsorted(df.index))
if not args.col_order:
lib.log.info("Sorting OTU table naturally")
df = df2.reindex(columns=natsorted(df2.columns))
else:
lib.log.info("Sorting OTU table by user defined order (--col_order)")
col_headers = args.col_order
#check if all names in headers or not
for i in col_headers:
if not i in df2.columns.values:
col_headers.remove(i)
df = df2.reindex(columns=col_headers)
SortedTable = df
if otuDict:
df['Taxonomy'] = pd.Series(otuDict)
df.to_csv(sorted_table, sep=delim)
del df['Taxonomy']
else:
df.to_csv(sorted_table, sep=delim)
#get sums of columns
fs = df.sum(axis=0)
#fs.to_csv('reads.per.sample.csv')
otus_per_sample_original = df[df > 0].count(axis=0, numeric_only=True)
filtered = pd.DataFrame(df, columns=fs.index)
filt2 = filtered.loc[(filtered != 0).any(1)]
tos = filt2.sum(axis=1)
fotus = tos[tos >= args.min_reads_otu] #valid allele must be found atleast from than 2 times, i.e. no singletons
if len(fotus.index) < len(tos.index):
diff = len(tos.index) - len(fotus.index)
lib.log.info("Removing {:,} OTUs according to --min_reads_otu {:,}".format(diff, args.min_reads_otu))
filt3 = pd.DataFrame(filt2, index=fotus.index)
if args.normalize == 'y':
#normalize the OTU table
normal = filt3.truediv(fs)
if otuDict:
normal['Taxonomy'] = pd.Series(otuDict)
normal.to_csv(normal_table_pct, sep=delim)
del normal['Taxonomy']
else:
normal.to_csv(normal_table_pct, sep=delim)
#normalize back to read counts, pretend 100,000 reads in each
norm_round = np.round(normal.multiply(100000), decimals=0)
if otuDict:
norm_round['Taxonomy'] = pd.Series(otuDict)
norm_round.to_csv(normal_table_nums, sep=delim)
del norm_round['Taxonomy']
else:
norm_round.to_csv(normal_table_nums, sep=delim)
lib.log.info("Normalizing OTU table to number of reads per sample")
else:
norm_round = filt3
# list for mock members empty if one wasn't passed
mock = []
if args.mock_barcode:
#now calculate the index-bleed in both directions (into the mock and mock into the other samples)
sample = []
#get names from mapping
for k,v in list(annotate_dict.items()):
if not '_suspect_mock_' in v:
mock.append(v)
for i in norm_round.index:
if not i in mock:
sample.append(i)
if args.ignore:
mock = [x for x in mock if x not in args.ignore]
sample = [x for x in sample if x not in args.ignore]
#first calculate bleed out of mock community
#slice normalized dataframe to get only mock OTUs from table
mock_df = pd.DataFrame(norm_round, index=mock)
#if there are samples to drop, make sure they aren't being used in this calculation
if args.drop:
mock_df.drop(args.drop, axis=1, inplace=True)
#get total number of reads from mock OTUs from entire table
total = np.sum(np.sum(mock_df,axis=None))
#now drop the mock barcode sample
mock_df.drop(args.mock_barcode, axis=1, inplace=True)
#get number of reads that are result of bleed over
bleed1 = np.sum(np.sum(mock_df,axis=None))
#calculate rate of bleed by taking num reads bleed divided by the total
bleed1max = bleed1 / float(total)
#second, calculate bleed into mock community
#get list of mock OTUs not found in any other sample -> these are likely chimeras
mock_only = pd.DataFrame(norm_round, index=list(norm_round.index), columns=[args.mock_barcode])
mock_OTUs_zeros = mock_only.loc[(mock_only==0).all(axis=1)]
theRest = [x for x in list(norm_round.columns.values) if x not in [args.mock_barcode]]
non_mocks = pd.DataFrame(norm_round, index=sample, columns=theRest)
non_mock_zeros = non_mocks.loc[(non_mocks==0).all(axis=1)]
zeros = [x for x in list(non_mock_zeros.index) if x not in list(mock_OTUs_zeros.index)]
if len(zeros) > 0:
lib.log.info("Found {:,} mock chimeras (only in mock sample and not mapped to mock sequences) excluding from index-bleed calculation".format(len(zeros)))
lib.log.debug('{:}'.format(', '.join(zeros)))
#now get updated list of samples, dropping chimeras
samples_trimmed = [x for x in sample if x not in zeros]
#slice the OTU table to get all OTUs that are not in mock community from the mock sample
sample_df = pd.DataFrame(norm_round, index=samples_trimmed, columns=[args.mock_barcode])
#get total number of reads that don't belong in mock
bleed2 = np.sum(np.sum(sample_df,axis=None))
#now pull the entire mock sample
mock_sample = pd.DataFrame(norm_round, columns=[args.mock_barcode])
#calcuate bleed into mock by taking num reads that don't belong divided by the total, so this is x% of bad reads in the mock
bleed2max = bleed2 / float(np.sum(mock_sample.sum(axis=1)))
#autocalculate the subtraction filter by taking the maximum value that doesn't belong
subtract_num = max(sample_df.max())
#get max values for bleed
#can only use into samples measurement if not using synmock
if args.calculate == 'all':
if bleed1max > bleed2max:
bleedfilter = math.ceil(bleed1max*1000)/1000
else:
bleedfilter = math.ceil(bleed2max*1000)/1000
lib.log.info("Index bleed, mock into samples: %f%%. Index bleed, samples into mock: %f%%." % (bleed1max*100, bleed2max*100))
else:
bleedfilter = math.ceil(bleed2max*1000)/1000
lib.log.info("Index bleed, samples into mock: %f%%." % (bleed2max*100))
else:
bleedfilter = args.index_bleed #this is value needed to filter MiSeq, Ion is likely less, but shouldn't effect the data very much either way.
if args.index_bleed:
args.index_bleed = float(args.index_bleed)
lib.log.info("Overwriting auto detect index-bleed, setting to %f%%" % (args.index_bleed*100))
bleedfilter = args.index_bleed
else:
if bleedfilter:
lib.log.info("Will use value of %f%% for index-bleed OTU filtering." % (bleedfilter*100))
else:
bleedfilter = 0 #no filtering if you don't pass -p or -b
lib.log.info("No spike-in mock (-b) or index-bleed (-p) specified, thus not running index-bleed filtering")
if bleedfilter > 0.05:
lib.log.info("Index bleed into samples is abnormally high (%f%%), if you have biological mock you should use `--calculate in`" % (bleedfilter*100))
#to combat barcode switching, loop through each OTU filtering out if less than bleedfilter threshold
cleaned = []
for row in norm_round.itertuples():
result = [row[0]]
if args.threshold == 'max':
total = max(row[1:]) #get max OTU count from table to calculate index bleed from.
elif args.threshold == 'sum':
total = sum(row[1:])
elif args.threshold == 'top25':
top = sorted(row[1:], key=int, reverse=True)
topn = int(round(len(row[1:])*0.25))
total = sum(top[:topn])
elif args.threshold == 'top10':
top = sorted(row[1:], key=int, reverse=True)
topn = int(round(len(row[1:])*0.10))
total = sum(top[:topn])
elif args.threshold == 'top5':
top = sorted(row[1:], key=int, reverse=True)
topn = int(round(len(row[1:])*0.05))
total = sum(top[:topn])
sub = total * bleedfilter
for i in row[1:]:
if i < sub:
i = 0
result.append(i)
cleaned.append(result)
header = [OTUhead]
for i in norm_round.columns:
header.append(i)
#create data frame of index bleed filtered results
final = pd.DataFrame(cleaned, columns=header)
final.set_index(OTUhead, inplace=True)
if args.drop: #if user has passed samples to drop, do it here, subtract drop list from Header
lib.log.info("Dropping %i samples from table: %s" % (len(args.drop), ', '.join(args.drop)))
colsdrop = []
for x in args.drop:
if x in header:
colsdrop.append(x)
#now drop those columns
final.drop(colsdrop, axis=1, inplace=True)
if args.subtract != 'auto':
subtract_num = int(args.subtract)
else:
try:
subtract_num = int(subtract_num)
lib.log.info("Auto subtract filter set to %i" % subtract_num)
except NameError:
subtract_num = 0
lib.log.info("Error: to use 'auto' subtract feature, provide a sample name to -b,--mock_barcode.")
if subtract_num != 0:
lib.log.info("Subtracting %i from OTU table" % subtract_num)
sub = final.subtract(subtract_num)
sub[sub < 0] = 0 #if negative, change to zero
sub = sub.loc[~(sub==0).all(axis=1)]
sub = sub.astype(int)
if otuDict:
sub['Taxonomy'] = pd.Series(otuDict)
sub.to_csv(subtract_table, sep=delim)
del sub['Taxonomy']
else:
sub.to_csv(subtract_table, sep=delim)
otus_if_sub = sub[sub > 0].count(axis=0, numeric_only=True)
final = sub.astype(int)
otus_per_sample = final[final > 0].count(axis=0, numeric_only=True)
stats = pd.concat([fs, otus_per_sample_original, otus_per_sample], axis=1)
stats.columns = ['reads per sample', 'original OTUs', 'final OTUs']
stats.fillna(0, inplace=True)
stats = stats.astype(int)
if args.show_stats:
print(stats.to_string())
stats.to_csv(stats_table, sep=delim)
#after all filtering, get list of OTUs in mock barcode
if args.mock_barcode:
mocks = final[args.mock_barcode]
mocks = mocks.loc[~(mocks==0)].astype(int)
totalmismatches = 0
totallength = 0
chimera_count = 0
variant_count = 0
for otu in mocks.index:
count = mocks[otu]
if 'suspect_mock' in otu:
if 'chimera' in otu:
chimera_count += 1
if 'variant' in otu:
variant_count += 1
otu = otu.split('_',1)[0]
else:
otu = otu.split('_',-1)[-1]
otu_length = SeqLength.get(otu)
countlen = otu_length * count
totallength += countlen
if otu in errorrate:
otu_diffs = errorrate.get(otu)[1]
totaldiffs = otu_diffs * count
totalmismatches += totaldiffs
else:
totalmismatches += countlen
e_rate = totalmismatches / float(totallength) * 100
lib.log.info(args.mock_barcode + ' sample has '+'{0:,}'.format(len(mocks))+' OTUS out of '+'{0:,}'.format(mock_ref_count)+ ' expected; '+'{0:,}'.format(variant_count)+ ' mock variants; '+ '{0:,}'.format(chimera_count)+ ' mock chimeras; Error rate: '+'{0:.3f}%'.format(e_rate))
if not args.keep_mock:
try:
final.drop(args.mock_barcode, axis=1, inplace=True)
except:
pass
#drop OTUs that are now zeros through whole table
final = final.loc[~(final==0).all(axis=1)]
final = final.astype(int)
#output filtered normalized table
if otuDict:
final['Taxonomy'] = pd.Series(otuDict)
final.to_csv(filtered_table, sep=delim)
del final['Taxonomy']
else:
final.to_csv(filtered_table, sep=delim)
#convert to binary
final[final > 0] = 1
#apply min_sample_otu here (most stringent filter, not sure I would use this unless you know what you are doing)
los = final.sum(axis=1)
fotus = los[los >= args.min_samples_otu]
keep = fotus.index
final2 = pd.DataFrame(final, index=keep)
diff = len(final.index) - len(keep)
if diff > 0:
lib.log.info('Dropped {:,} OTUs found in fewer than {:,} samples'.format(diff, args.min_samples_otu))
#drop samples that don't have any OTUs after filtering
final3 = final2.loc[:, (final2 != 0).any(axis=0)]
final3 = final3.astype(int)
#get the actual read counts from binary table
merge = {}
for index, row in final3.items():
merge[index] = []
for i in range(0, len(row)):
if row[i] == 0:
merge[index].append(row[i])
else:
merge[index].append(SortedTable[index][row.index[i]])
FiltTable = pd.DataFrame(merge, index=list(final3.index))
FiltTable.index.name = '#OTU ID'
#order the filtered table
#sort the table
FiltTable2 = FiltTable.reindex(index=natsorted(FiltTable.index))
if not args.col_order:
FiltTable = FiltTable2.reindex(columns=natsorted(FiltTable2.columns))
else:
col_headers = args.col_order
#check if all names in headers or not
for i in col_headers:
if not i in FiltTable2.columns.values:
col_headers.remove(i)
FiltTable = FiltTable2.reindex(columns=col_headers)
#check for negative samples and how many OTUs are in these samples
#if found, filter the OTUs and alert user to rebuild OTU table, I could do this automatically, but would then require
#there to be reads passed to this script which seems stupid. Just deleting the OTUs is probably not okay....
if args.negatives:
if len(args.negatives) > 1: #if greater than 1 then assuming list of sample names
Neg = args.negatives
else:
if os.path.isfile(args.negatives[0]): #check if it is a file or not
Neg = []
with open(args.negatives[0], 'r') as negfile:
for line in negfile:
line = line.replace('\n', '')
Neg.append(line)
else:
Neg = args.negatives
#Now slice the final OTU table, check if values are valid
NotFound = []
for i in Neg:
if not i in FiltTable.columns.values:
Neg.remove(i)
NotFound.append(i)
if len(NotFound) > 0:
lib.log.info('Samples not found: %s' % ' '.join(NotFound))
#slice table
NegTable = FiltTable.reindex(columns=Neg)
#drop those that are zeros through all samples, just pull out OTUs found in the negative samples
NegTable = NegTable.loc[~(NegTable==0).all(axis=1)]
NegOTUs = list(NegTable.index)
#now make sure you aren't dropping mock OTUs as you want to keep those for filtering new OTU table
NegOTUs = [item for item in NegOTUs if item not in mock]
else:
NegOTUs = []
#check if negative OTUs exist, if so, then output updated OTUs and instructions on creating new OTU table
if len(NegOTUs) > 0:
lib.log.info("%i OTUs are potentially contamination" % len(NegOTUs))
otu_clean = base + '.cleaned.otus.fa'
otu_contam = base + '.negatives.otus.fa'
with open(otu_clean, 'w') as otu_update:
with open(otu_contam, 'w') as otu_dirty:
with open(args.fasta, "r") as myfasta:
for rec in SeqIO.parse(myfasta, 'fasta'):
if not rec.id in NegOTUs:
SeqIO.write(rec, otu_update, 'fasta')
else:
SeqIO.write(rec, otu_dirty, 'fasta')
lib.log.info("Cleaned OTUs saved to: {}".format(otu_clean))
lib.log.info("OTUs found in negative samples: {}".format(otu_contam))
lib.log.info("Generate a new OTU table like so:\namptk remove -i %s --format fasta -l %s -o %s\nvsearch --usearch_global %s --db %s --strand plus --id 0.97 --otutabout newOTU.table.txt\n" % (base+'.demux.fq.gz', ' '.join(Neg), base+'.cleaned.fq.gz', base+'.cleaned.fa', otu_clean))
else: #proceed with rest of script
#output final table
if otuDict:
FiltTable['Taxonomy'] = pd.Series(otuDict)
FiltTable.to_csv(final_table, sep=delim)
del FiltTable['Taxonomy']
else:
FiltTable.to_csv(final_table, sep=delim)
finalSamples = FiltTable.columns.values.tolist()
if 'Taxonomy' in finalSamples:
numFinalSamples = len(finalSamples) - 1
else:
numFinalSamples = len(finalSamples)
lib.log.info('Filtered OTU table contains {:,} samples, {:,} OTUs, and {:,} read counts'.format(numFinalSamples, len(FiltTable.index), FiltTable.values.sum()))
if numFinalSamples < len(df.columns.values.tolist()):
diffSamples = [item for item in headers if item not in FiltTable.columns.values.tolist()]
lib.log.info('Samples dropped: %s' % (','.join(diffSamples)))
#output binary table
if otuDict:
final3['Taxonomy'] = pd.Series(otuDict)
final3.to_csv(final_binary_table, sep=delim)
else:
final3.to_csv(final_binary_table, sep=delim)
#generate final OTU list for taxonomy
lib.log.info("Finding valid OTUs")
otu_new = base + '.filtered.otus.fa'
with open(otu_new, 'w') as otu_update:
with open(args.fasta, "r") as myfasta:
for rec in SeqIO.parse(myfasta, 'fasta'):
if ';' in rec.id:
rec.id = rec.id.split(';',1)[0]
if args.mock_barcode:
#map new names of mock
if rec.id in annotate_dict:
newname = annotate_dict.get(rec.id)
rec.id = newname
rec.description = ''
if rec.id in final3.index:
if rec.id in OTU_tax:
otu_update.write('>%s;%s\n%s\n' % (rec.id, OTU_tax.get(rec.id), rec.seq))
else:
otu_update.write('>%s\n%s\n' % (rec.id, rec.seq))
#tell user what output files are
print("-------------------------------------------------------")
print("OTU Table filtering finished")
print("-------------------------------------------------------")
print("OTU Table Stats: %s" % stats_table)
print("Sorted OTU table: %s" % sorted_table)
if not args.debug:
for i in [normal_table_pct, normal_table_nums, subtract_table, mock_out, FastaCounts]:
lib.removefile(i)
else:
print("Normalized (pct): %s" % normal_table_pct)
print("Normalized (10k): %s" % normal_table_nums)
if args.subtract != 0:
print("Subtracted table: %s" % subtract_table)
print("Normalized/filter: %s" % filtered_table)
print("Final Binary table: %s" % final_binary_table)
print("Final OTU table: %s" % final_table)
print("Filtered OTUs: %s" % otu_new)
print("-------------------------------------------------------")
if 'darwin' in sys.platform:
print(colr.WARN + "\nExample of next cmd:" + colr.END + " amptk taxonomy -f %s -i %s -m mapping_file.txt -d ITS2\n" % (otu_new, final_table))
else:
print("\nExample of next cmd: amptk taxonomy -f %s -i %s -m mapping_file.txt -d ITS2\n" % (otu_new, final_table))
if __name__ == "__main__":
main(args)
| bsd-2-clause |
AZMAG/urbansim | urbansim/utils/networks.py | 3 | 1785 | from __future__ import print_function
import logging
import yaml
import numpy as np
import orca
import pandas as pd
from . import misc
from ..models import util
logger = logging.getLogger(__name__)
def from_yaml(net, cfgname):
print("Computing accessibility variables")
cfg = yaml.safe_load(open(misc.config(cfgname)))
nodes = pd.DataFrame(index=net.node_ids)
assert "node_col" in cfg, "Need to specify from where to take the node id"
node_col = cfg.get('node_col')
for variable in cfg['variable_definitions']:
name = variable["name"]
print("Computing %s" % name)
decay = variable.get("decay", "linear")
agg = variable.get("aggregation", "sum")
vname = variable.get("varname", None)
radius = variable["radius"]
dfname = variable["dataframe"]
flds = [vname] if vname else []
flds.append(node_col)
if "filters" in variable:
flds += util.columns_in_filters(variable["filters"])
logger.info(" Fields available to aggregate = " + ', '.join(flds))
df = orca.get_table(dfname).to_frame(flds)
if "filters" in variable:
df = util.apply_filter_query(df, variable["filters"])
logger.info(" Filters = %s" % variable["filters"])
logger.info(" dataframe = %s, varname=%s" % (dfname, vname))
logger.info(" radius = %s, aggregation = %s, decay = %s" % (
radius, agg, decay))
# set the variable
net.set(df[node_col], variable=df[vname] if vname else None)
# aggregate it
nodes[name] = net.aggregate(radius, type=agg, decay=decay)
if "apply" in variable:
nodes[name] = nodes[name].apply(eval(variable["apply"]))
return nodes
| bsd-3-clause |
kgullikson88/HET-Scripts | PlotFits.py | 1 | 2089 | import sys
from astropy.io import fits as pyfits
import matplotlib.pyplot as plt
import numpy as np
import HelperFunctions
import FittingUtilities
if __name__ == "__main__":
fileList = []
tellurics = False
normalize = False
byorder = False # Plots one order at a time
pixelscale = False
oneplot = False
for arg in sys.argv[1:]:
if "tellcorr" in arg:
tellurics = True
elif "-norm" in arg:
normalize = True
elif "-order" in arg:
byorder = True
elif "-pix" in arg:
pixelscale = True
# byorder = True
elif "-one" in arg:
oneplot = True
else:
fileList.append(arg)
linestyles = ['k-', 'r-', 'b-', 'g-']
for fnum, fname in enumerate(fileList):
ls = linestyles[fnum % len(linestyles)]
orders = HelperFunctions.ReadExtensionFits(fname)
print fname, len(orders)
if not oneplot:
plt.figure(fnum)
plt.title(fname)
if tellurics:
model = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="model")
for i, order in enumerate(orders):
# order.cont = FindContinuum.Continuum(order.x, order.y, lowreject=3, highreject=3)
if pixelscale:
order.x = np.arange(order.size())
if tellurics:
plt.plot(order.x, order.y / order.cont, 'k-')
plt.plot(order.x, model[i].y, 'r-')
else:
if normalize:
plt.plot(order.x, order.y / order.cont, 'k-')
plt.text(order.x.mean(), 1.1, str(i + 1))
else:
plt.plot(order.x, order.y, ls)
#plt.plot(order.x, order.cont)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Flux")
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
if byorder:
plt.title("Order %i" % i)
plt.show()
if not byorder:
plt.show()
| gpl-3.0 |
selective-inference/selective-inference | doc/learning_examples/parametric/lasso_selected.py | 3 | 7527 | import functools, hashlib
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.learners import mixture_learner
from selection.learning.utils import naive_partial_model_inference, pivot_plot
from selection.learning.core import gbm_fit_sk, infer_general_target
#### A parametric model will need something like this
class gaussian_OLS_learner(mixture_learner):
def __init__(self,
algorithm,
observed_selection,
X_select,
observed_MLE,
observed_Y):
(self.algorithm,
self.observed_outcome,
self.X_select,
self.observed_MLE,
self.observed_Y) = (algorithm,
observed_selection,
X_select,
observed_MLE,
observed_Y)
self.observed_target = observed_MLE
_dispersion = observed_MLE[-1]
gram_matrix = X_select.T.dot(X_select)
self._chol = (np.linalg.cholesky(np.linalg.inv(gram_matrix)) *
np.sqrt(_dispersion))
n, p = X_select.shape
self._Xinv = np.linalg.pinv(X_select)
self._beta_cov = _dispersion * self._Xinv.dot(self._Xinv.T)
self._resid = observed_Y - X_select.dot(self._Xinv.dot(observed_Y))
def learning_proposal(self):
"""
Return perturbed data and perturbed MLE.
"""
n, s = self.X_select.shape
beta_hat = self.observed_MLE[:-1]
dispersion = self.observed_MLE[-1]
perturbed_beta = beta_hat.copy()
nidx = np.random.choice(np.arange(s), min(3, s), replace=False)
for idx in nidx:
scale = np.random.choice(self.scales, 1)
perturbed_beta[idx] += (scale * np.random.standard_normal() *
np.sqrt(self._beta_cov[idx, idx]))
resid = np.random.standard_normal(n) * np.sqrt(dispersion)
resid -= self.X_select.dot(self._Xinv.dot(resid))
perturbed_Y = self.X_select.dot(perturbed_beta) + resid
perturbed_MLE = np.zeros(s+1)
perturbed_MLE[:s] = perturbed_beta
perturbed_MLE[-1] = np.sum(resid**2) / (n - s)
return perturbed_MLE, perturbed_Y
####
def simulate(n=500, p=30, s=5, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000):
# description of statistical problem
X, y, truth, _, _, sigmaX = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)
def algorithm(lam, X, y):
n, p = X.shape
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=X.T.dot(X))
pen = rr.l1norm(p, lagrange=lam)
S = -X.T.dot(y)
loss.quadratic = rr.identity_quadratic(0, 0, S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=100, tol=1.e-10)
success += soln != 0
return set(np.nonzero(success)[0])
# run selection algorithm
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(algorithm, lam, X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
observed_tuple = selection_algorithm(y)
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
if len(observed_tuple) > 0:
s = len(observed_tuple)
X_select = X[:, list(observed_tuple)]
Xpi = np.linalg.pinv(X_select)
final_target = np.zeros(s+1)
final_target[:s] = Xpi.dot(X.dot(truth))
final_target[-1] = sigma**2
observed_target = Xpi.dot(y)
resid = y - X_select.dot(observed_target)
dispersion = np.linalg.norm(resid)**2 / (n-s)
target_cov = np.zeros((s+1, s+1))
target_cov[:s][:,:s] = Xpi.dot(Xpi.T) * dispersion
target_cov[s, s] = 2 * dispersion**2 / (n - s)
MLE = np.zeros(s+1)
MLE[:s] = observed_target
MLE[-1] = dispersion
learner = gaussian_OLS_learner(selection_algorithm,
observed_tuple,
X_select,
MLE,
y)
print(observed_tuple)
results = infer_general_target(observed_tuple,
MLE,
target_cov,
learner,
hypothesis=final_target,
fit_probability=gbm_fit_sk,
fit_args={'n_estimators':5000},
alpha=alpha,
B=B)
for result, true_target in zip(results, final_target):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target) * (interval[1] > true_target))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
if len(observed_tuple) > 0:
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'target':final_target,
'variable':list(observed_tuple) + [-1], # -1 for dispersion,
'id':[instance_id]*len(pivots),
})
naive = True # report naive intervals as well?
if naive:
naive_df = naive_partial_model_inference(X,
y,
dispersion,
truth,
observed_tuple,
alpha=alpha)
df = pd.merge(df, naive_df, on='variable')
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(2000):
df = simulate(B=10000)
csvfile = 'lasso_selected.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| bsd-3-clause |
alliemacleay/MachineLearning_CS6140 | Homeworks/HW7/__init__.py | 1 | 10385 | import inspect
import warnings
import collections
import cython
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KernelDensity
import numpy.linalg as la
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.metrics.pairwise import cosine_similarity
import os
import subprocess
subprocess.call(["cython", "-a", os.path.join(os.getcwd(), "CS6140_A_MacLeay/Homeworks/HW7/speedy.pyx")])
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()},
reload_support=True)
import speedy
__author__ = 'Allison MacLeay'
class KNN(object):
def __init__(self, n_neighbors=5, classifier=KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='minkowski', p=2)):
self.k = n_neighbors
self.classifier = classifier
def predict(self, X_test, X, y):
sciKNN = self.classifier
sciKNN.fit(X, y)
return sciKNN.predict(X_test)
class MyKNN(object):
def __init__(self, n_neighbors=5, algorithm='brute', metric='minkowski', metric_params=None, p=2, cls_metric=np.mean, radius=None, density=False, outlier_label=None, bandwidth=None):
self.n_neighbors = n_neighbors
self.metric = metric
if (metric == 'minkowski' and p == 2) or metric == 'euclidean':
self.kernel = speedy.Kernel('euclidean')
else:
self.kernel = Kernel(ktype=metric)
self.N = None
self.cls_metric = cls_metric
self.X_train = None
self.y_train = None
self.radius = radius
self.density = density
self.outlier_label = outlier_label
self.outlier_index = None
self.bandwidth = bandwidth # for density
def fit(self, X, y):
if type(X) is not np.ndarray:
X = np.asarray(X)
y = np.asarray(y, dtype=np.float)
self.X_train = X
self.y_train = y
if self.outlier_label is not None:
self.outlier_index = self.y_train.shape[0]
self.y_train = np.append(self.y_train, self.outlier_label)
def predict(self, X_test):
dec = self.decision_function(X_test)
dsz = len(dec)
return [-1 if dec[i] <= 0 else 1 for i in range(dsz)]
def decision_function(self, X_test):
# Map to K
print 'my predict {} {}'.format(self.n_neighbors, self.kernel.name())
if type(X_test) is not np.ndarray:
X_test = np.asarray(X_test)
#K = speedy.calc_K(self.kernel, X_test, self.X_train)
print('start kernel')
K = calc_K(self.kernel, X_test, self.X_train)
print 'my Kernel calculated'
print K
print K.shape
y_pred = np.zeros(X_test.shape[0])
if self.radius is not None:
#radius
return speedy.decision_function_radius(K, np.array(X_test), self.y_train, self.n_neighbors, self.kernel.name(),
float(self.radius), float(self.outlier_label), int(self.outlier_index), self.cls_metric)
elif self.density:
px_given_1 = np.zeros(K.shape[0])
px_given_0 = np.zeros(K.shape[0])
print set(self.y_train)
p1 = float(np.sum(self.y_train > .5)) / self.y_train.shape[0]
print(collections.Counter(self.y_train))
print(p1)
#p0_arr = np.zeros(K.shape[0])
for i in range(K.shape[0]):
#print('predict {}'.format(i))
# k for each sample in test set i-test j-train
ones = K[i, self.y_train > .5]
zeros = K[i, self.y_train <= .5]
print ones
n_ones = len(ones)
n_zeros = len(zeros)
sum_ones = float(np.sum(ones))
sum_zeros = float(np.sum(zeros))
total = sum_ones + sum_zeros
if total == 0:
px_given_1[i] = 0
px_given_0[i] = 0
continue
px_given_1[i] = sum_ones / total
px_given_0[i] = sum_zeros / total
px1 = np.asarray([float(p1 * px_given_1[i]) for i in xrange(K.shape[0])])
print(px1)
px0 = np.asarray([float((1.0 - p1) * px_given_0[i]) for i in xrange(K.shape[0])])
zs = [a + b for a, b in zip(px0, px1)]
px1 /= zs
px0 /= zs
print(zip(px1, px0))
y_pred = [1 if px1[i] > px0[i] else 0 for i in range(K.shape[0])]
else:
self.N = np.array([sorted(zip(K[i, :], range(len(K[i, :]))))[:self.n_neighbors] for i in range(K.shape[0])])
if not self.density:
for i in xrange(self.N.shape[0]):
y_pred[i] = self.cls_metric([self.y_train[self.N[i][j][1]] for j in xrange(self.N[i].shape[0])])
return y_pred
# get_params needed for clone() in multiclass.py
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
# _get_param_names needed for clone() in multiclass.py
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def calc_K(kernel, X_test, X_train):
n_samples = X_test.shape[0]
n_samples_train = X_train.shape[0]
K = np.zeros(shape=(n_samples, n_samples_train))
for i in range(n_samples):
for j in range(n_samples_train):
K[i, j] = kernel.f(X_test, X_train, i, j)
return K
class Kernel(object):
def __init__(self, ktype='euclidean', sigma=1):
self.sigma = sigma # for Gaussian
self.ktype = ktype
self.f = None
if ktype == 'euclidean' or ktype == 'minkowski':
self.f = self.euclid
if ktype == 'cosine':
self.f = self.cosine
if ktype == 'cosine_sci':
self.f = self.cosine_sci
if ktype == 'cosine_similarity':
self.f = self.cosine_similarity
if ktype == 'gaussian':
self.f = self.gaussian
if ktype == 'poly2':
self.f = self.poly2
if ktype == 'gaussian_sci':
self.f = self.gaussian_sci
if ktype == 'gaussian_density':
self.f = self.gaussian_density
if ktype == 'poly2_sci':
self.f = self.poly2_sci
def euclid(self, xi, xj, **kwargs):
return np.sqrt(np.sum([(xi[m]-xj[m]) ** 2 for m in range(xi.shape[0])]))
#return [np.sqrt(np.sum((xi[m] - xj[m]) **2)) for m in range(xi.shape[0])]
def cosine(self, X, Xt, i, j):
# X and Xt are vectors
return 1-(np.dot(X[i], Xt[j].T) / (la.norm(X[i]) * la.norm(Xt[j]))) # equals cosine distance
#return cosine(X[i], Xt[j])
#return cosine_similarity(xi, xj)
def cosine_similarity(self, X, Xt, i, j):
return cosine_similarity(X[i], Xt[j])
def cosine_sci(self, xi, xj):
return 1-(np.dot(xi, xj.T) / (la.norm(xi) * la.norm(xj))) # equals cosine distance
def xxxgaussian(self, xi, xj, i=None, j=None, sigma=1, **kwargs):
return np.sum([np.exp(-(la.norm(x-y) ** 2 / (2 * sigma ** 2))) for x, y in zip (xi, xj)])
def gaussian(self, x, y, i=None, j=None, sigma=1, **kwargs):
return np.exp(-(la.norm(x[i]-y[j]) ** 2 / (2 * sigma ** 2)))
def gaussian_sci(self, xi, yj):
sigma = 1
return np.exp(-(la.norm(xi-yj) ** 2 / (2 * sigma ** 2)))
def gaussian_density(self, x, y, i, j):
deltaRow = x[i] - y[j]
return np.exp(np.dot(deltaRow, deltaRow.T) / -(2**2))
def poly2(self, x, y, i, j):
return - np.dot(x[i], y[j]) ** 2
#return np.sum[xi*yi+ xi**2 * yi**2 + 2*xi*yi for xi, yi in zip(x[i], y[i])]
def poly2_sci(self, xi, xj, **kwargs):
return - np.dot(xi, xj) ** 2
#return np.sum[xi*yi+ xi**2 * yi**2 + 2*xi*yi for xi, yi in zip(x[i], y[i])]
def name(self):
return self.ktype
def compute(self, xi, xj, **kwargs):
return self.f(xi, xj)
def testCython():
print 'out of speedy'
speedy.test()
| mit |
jkarnows/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
huongttlan/bokeh | examples/charts/file/boxplot.py | 37 | 1117 | from collections import OrderedDict
import pandas as pd
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.olympics2014 import data
# create a DataFrame with the sample data
df = pd.io.json.json_normalize(data['data'])
# filter by countries with at least one medal and sort
df = df[df['medals.total'] > 0]
df = df.sort("medals.total", ascending=False)
# get the countries and group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values
# build a dict containing the grouped data
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
# any of the following commented are valid BoxPlot inputs
#medals = pd.DataFrame(medals)
#medals = list(medals.values())
#medals = tuple(medals.values())
#medals = np.array(list(medals.values()))
output_file("boxplot.html")
boxplot = BoxPlot(
medals, marker='circle', outliers=True, title="boxplot test",
xlabel="medal type", ylabel="medal count", width=800, height=600)
show(boxplot) | bsd-3-clause |
ghchinoy/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 9 | 13792 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.squared_difference(predictions, labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
florian-f/sklearn | sklearn/tests/test_random_projection.py | 6 | 13183 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import (
johnson_lindenstrauss_min_dim,
gaussian_random_matrix,
sparse_random_matrix,
SparseRandomProjection,
GaussianRandomProjection)
from sklearn.utils.testing import (
assert_less,
assert_raises,
assert_raise_message,
assert_array_equal,
assert_equal,
assert_almost_equal,
assert_in,
assert_array_almost_equal)
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = n_samples * n_features / 100.
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
RandomProjection(n_components=n_features + 1).fit(data)
assert_equal(len(w), 1)
assert issubclass(w[-1].category, UserWarning)
| bsd-3-clause |
thunderhoser/GewitterGefahr | gewittergefahr/plotting/model_eval_plotting.py | 1 | 30772 | """Plotting methods for model evaluation.
This module can be used to evaluate any kind of weather model (machine learning,
NWP, heuristics, human forecasting, etc.). This module is completely agnostic
of where the forecasts come from.
--- REFERENCES ---
Hsu, W., and A. Murphy, 1986: "The attributes diagram: A geometrical framework
for assessing the quality of probability forecasts". International Journal
of Forecasting, 2 (3), 285-293.
"""
import numpy
from descartes import PolygonPatch
from matplotlib import pyplot
import matplotlib.colors
from gewittergefahr.gg_utils import model_evaluation as model_eval
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.plotting import plotting_utils
# TODO(thunderhoser): Variable and method names are way too verbose.
ROC_CURVE_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255
ROC_CURVE_WIDTH = 3.
RANDOM_ROC_COLOUR = numpy.full(3, 152. / 255)
RANDOM_ROC_WIDTH = 2.
PERF_DIAGRAM_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255
PERF_DIAGRAM_WIDTH = 3.
FREQ_BIAS_COLOUR = numpy.full(3, 152. / 255)
FREQ_BIAS_WIDTH = 2.
FREQ_BIAS_STRING_FORMAT = '%.2f'
FREQ_BIAS_PADDING = 10
FREQ_BIAS_LEVELS = numpy.array([0.25, 0.5, 0.75, 1, 1.5, 2, 3, 5])
CSI_LEVELS = numpy.linspace(0, 1, num=11, dtype=float)
PEIRCE_SCORE_LEVELS = numpy.linspace(0, 1, num=11, dtype=float)
RELIABILITY_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255
RELIABILITY_WIDTH = 3.
PERFECT_RELIA_COLOUR = numpy.full(3, 152. / 255)
PERFECT_RELIA_WIDTH = 2.
ZERO_BSS_COLOUR = numpy.array([31, 120, 180], dtype=float) / 255
ZERO_BSS_LINE_WIDTH = 2.
CLIMO_COLOUR = numpy.full(3, 152. / 255)
CLIMO_LINE_WIDTH = 2.
BAR_FACE_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255
BAR_EDGE_COLOUR = numpy.full(3, 0.)
BAR_EDGE_WIDTH = 2.
HISTOGRAM_LEFT_EDGE = 0.2
HISTOGRAM_BOTTOM_EDGE = 0.575
HISTOGRAM_AXES_WIDTH = 0.25
HISTOGRAM_AXES_HEIGHT = 0.25
HISTOGRAM_X_VALUES = numpy.linspace(0., 1., num=6)
HISTOGRAM_Y_SPACING = 0.1
POLYGON_OPACITY = 0.5
POSITIVE_BSS_OPACITY = 0.2
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
def _get_csi_colour_scheme():
"""Returns colour scheme for CSI (critical success index).
:return: colour_map_object: Colour scheme (instance of
`matplotlib.colors.ListedColormap`).
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map.
"""
this_colour_map_object = pyplot.get_cmap('Blues')
this_colour_norm_object = matplotlib.colors.BoundaryNorm(
CSI_LEVELS, this_colour_map_object.N)
rgba_matrix = this_colour_map_object(this_colour_norm_object(CSI_LEVELS))
colour_list = [
rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0])
]
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1.))
colour_norm_object = matplotlib.colors.BoundaryNorm(
CSI_LEVELS, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_peirce_colour_scheme():
"""Returns colour scheme for Peirce score.
:return: colour_map_object: Colour scheme (instance of
`matplotlib.colors.ListedColormap`).
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map.
"""
this_colour_map_object = pyplot.get_cmap('Blues')
this_colour_norm_object = matplotlib.colors.BoundaryNorm(
PEIRCE_SCORE_LEVELS, this_colour_map_object.N)
rgba_matrix = this_colour_map_object(
this_colour_norm_object(PEIRCE_SCORE_LEVELS)
)
colour_list = [
rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0])
]
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1.))
colour_norm_object = matplotlib.colors.BoundaryNorm(
PEIRCE_SCORE_LEVELS, colour_map_object.N)
return colour_map_object, colour_norm_object
def _confidence_interval_to_polygon(
x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top,
for_performance_diagram=False):
"""Generates polygon for confidence interval.
P = number of points in bottom curve = number of points in top curve
:param x_coords_bottom: length-P numpy with x-coordinates of bottom curve
(lower end of confidence interval).
:param y_coords_bottom: Same but for y-coordinates.
:param x_coords_top: length-P numpy with x-coordinates of top curve (upper
end of confidence interval).
:param y_coords_top: Same but for y-coordinates.
:param for_performance_diagram: Boolean flag. If True, confidence interval
is for a performance diagram, which means that coordinates will be
sorted in a slightly different way.
:return: polygon_object: Instance of `shapely.geometry.Polygon`.
"""
nan_flags_top = numpy.logical_or(
numpy.isnan(x_coords_top), numpy.isnan(y_coords_top)
)
if numpy.all(nan_flags_top):
return None
nan_flags_bottom = numpy.logical_or(
numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom)
)
if numpy.all(nan_flags_bottom):
return None
real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0]
real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0]
if for_performance_diagram:
y_coords_top = y_coords_top[real_indices_top]
sort_indices_top = numpy.argsort(y_coords_top)
y_coords_top = y_coords_top[sort_indices_top]
x_coords_top = x_coords_top[real_indices_top][sort_indices_top]
y_coords_bottom = y_coords_bottom[real_indices_bottom]
sort_indices_bottom = numpy.argsort(-y_coords_bottom)
y_coords_bottom = y_coords_bottom[sort_indices_bottom]
x_coords_bottom = x_coords_bottom[real_indices_bottom][
sort_indices_bottom
]
else:
x_coords_top = x_coords_top[real_indices_top]
sort_indices_top = numpy.argsort(-x_coords_top)
x_coords_top = x_coords_top[sort_indices_top]
y_coords_top = y_coords_top[real_indices_top][sort_indices_top]
x_coords_bottom = x_coords_bottom[real_indices_bottom]
sort_indices_bottom = numpy.argsort(x_coords_bottom)
x_coords_bottom = x_coords_bottom[sort_indices_bottom]
y_coords_bottom = y_coords_bottom[real_indices_bottom][
sort_indices_bottom
]
polygon_x_coords = numpy.concatenate((
x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]])
))
polygon_y_coords = numpy.concatenate((
y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]])
))
return polygons.vertex_arrays_to_polygon_object(
polygon_x_coords, polygon_y_coords)
def _plot_background_of_attributes_diagram(axes_object, climatology):
"""Plots background (references lines and polygons) of attributes diagram.
For more on the attributes diagram, see Hsu and Murphy (1986).
BSS = Brier skill score. For more on the BSS, see
`model_evaluation.get_brier_skill_score`.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param climatology: Event frequency for the entire dataset.
"""
error_checking.assert_is_geq(climatology, 0.)
error_checking.assert_is_leq(climatology, 1.)
(x_coords_left_skill_area, y_coords_left_skill_area,
x_coords_right_skill_area, y_coords_right_skill_area
) = model_eval.get_skill_areas_in_reliability_curve(climatology)
skill_area_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(ZERO_BSS_COLOUR),
POSITIVE_BSS_OPACITY
)
left_polygon_object = polygons.vertex_arrays_to_polygon_object(
x_coords_left_skill_area, y_coords_left_skill_area
)
left_polygon_patch = PolygonPatch(
left_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour
)
axes_object.add_patch(left_polygon_patch)
right_polygon_object = polygons.vertex_arrays_to_polygon_object(
x_coords_right_skill_area, y_coords_right_skill_area
)
right_polygon_patch = PolygonPatch(
right_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour
)
axes_object.add_patch(right_polygon_patch)
no_skill_x_coords, no_skill_y_coords = (
model_eval.get_no_skill_reliability_curve(climatology)
)
axes_object.plot(
no_skill_x_coords, no_skill_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(ZERO_BSS_COLOUR),
linestyle='solid', linewidth=ZERO_BSS_LINE_WIDTH
)
climo_x_coords, climo_y_coords = (
model_eval.get_climatology_line_for_reliability_curve(climatology)
)
axes_object.plot(
climo_x_coords, climo_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(CLIMO_COLOUR),
linestyle='dashed', linewidth=CLIMO_LINE_WIDTH
)
no_resolution_x_coords, no_resolution_y_coords = (
model_eval.get_no_resolution_line_for_reliability_curve(climatology)
)
axes_object.plot(
no_resolution_x_coords, no_resolution_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(CLIMO_COLOUR),
linestyle='dashed', linewidth=CLIMO_LINE_WIDTH
)
def _plot_inset_histogram_for_attributes_diagram(
figure_object, num_examples_by_bin):
"""Plots forecast histogram inset in attributes diagram.
For more on the attributes diagram, see Hsu and Murphy (1986).
B = number of forecast bins
:param figure_object: Instance of `matplotlib.figure.Figure`.
:param num_examples_by_bin: length-B numpy array with number of examples in
each forecast bin.
"""
error_checking.assert_is_integer_numpy_array(num_examples_by_bin)
error_checking.assert_is_numpy_array(num_examples_by_bin, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)
num_forecast_bins = len(num_examples_by_bin)
error_checking.assert_is_geq(num_forecast_bins, 2)
example_frequency_by_bin = (
num_examples_by_bin.astype(float) / numpy.sum(num_examples_by_bin)
)
forecast_bin_edges = numpy.linspace(0., 1., num=num_forecast_bins + 1)
forecast_bin_width = forecast_bin_edges[1] - forecast_bin_edges[0]
forecast_bin_centers = forecast_bin_edges[:-1] + forecast_bin_width / 2
inset_axes_object = figure_object.add_axes([
HISTOGRAM_LEFT_EDGE, HISTOGRAM_BOTTOM_EDGE,
HISTOGRAM_AXES_WIDTH, HISTOGRAM_AXES_HEIGHT
])
inset_axes_object.bar(
forecast_bin_centers, example_frequency_by_bin, forecast_bin_width,
color=plotting_utils.colour_from_numpy_to_tuple(BAR_FACE_COLOUR),
edgecolor=plotting_utils.colour_from_numpy_to_tuple(BAR_EDGE_COLOUR),
linewidth=BAR_EDGE_WIDTH
)
max_y_tick_value = rounder.floor_to_nearest(
1.05 * numpy.max(example_frequency_by_bin), HISTOGRAM_Y_SPACING
)
num_y_ticks = 1 + int(numpy.round(
max_y_tick_value / HISTOGRAM_Y_SPACING
))
y_tick_values = numpy.linspace(0., max_y_tick_value, num=num_y_ticks)
pyplot.xticks(HISTOGRAM_X_VALUES, axes=inset_axes_object)
pyplot.yticks(y_tick_values, axes=inset_axes_object)
inset_axes_object.set_xlim(0., 1.)
inset_axes_object.set_ylim(0., 1.05 * numpy.max(example_frequency_by_bin))
inset_axes_object.set_title('Forecast histogram', fontsize=20)
def plot_roc_curve(axes_object, pod_by_threshold, pofd_by_threshold,
line_colour=ROC_CURVE_COLOUR, plot_background=True):
"""Plots ROC (receiver operating characteristic) curve.
T = number of binarization thresholds
For the definition of a "binarization threshold" and the role they play in
ROC curves, see `model_evaluation.get_points_in_roc_curve`.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
:param pofd_by_threshold: length-T numpy array of POFD (probability of false
detection) values.
:param line_colour: Line colour.
:param plot_background: Boolean flag. If True, will plot background
(reference line and Peirce-score contours).
:return: line_handle: Line handle for ROC curve.
"""
error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
pod_by_threshold, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
pod_by_threshold, 1., allow_nan=True)
num_thresholds = len(pod_by_threshold)
expected_dim = numpy.array([num_thresholds], dtype=int)
error_checking.assert_is_numpy_array(
pofd_by_threshold, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(
pofd_by_threshold, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
pofd_by_threshold, 1., allow_nan=True)
error_checking.assert_is_boolean(plot_background)
if plot_background:
pofd_matrix, pod_matrix = model_eval.get_pofd_pod_grid()
peirce_score_matrix = pod_matrix - pofd_matrix
this_colour_map_object, this_colour_norm_object = (
_get_peirce_colour_scheme()
)
pyplot.contourf(
pofd_matrix, pod_matrix, peirce_score_matrix, CSI_LEVELS,
cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0.,
vmax=1., axes=axes_object)
colour_bar_object = plotting_utils.plot_colour_bar(
axes_object_or_matrix=axes_object, data_matrix=peirce_score_matrix,
colour_map_object=this_colour_map_object,
colour_norm_object=this_colour_norm_object,
orientation_string='vertical', extend_min=False, extend_max=False,
fraction_of_axis_length=0.8)
colour_bar_object.set_label('Peirce score (POD minus POFD)')
random_x_coords, random_y_coords = model_eval.get_random_roc_curve()
axes_object.plot(
random_x_coords, random_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(RANDOM_ROC_COLOUR),
linestyle='dashed', linewidth=RANDOM_ROC_WIDTH
)
nan_flags = numpy.logical_or(
numpy.isnan(pofd_by_threshold), numpy.isnan(pod_by_threshold)
)
if numpy.all(nan_flags):
line_handle = None
else:
real_indices = numpy.where(numpy.invert(nan_flags))[0]
line_handle = axes_object.plot(
pofd_by_threshold[real_indices], pod_by_threshold[real_indices],
color=plotting_utils.colour_from_numpy_to_tuple(line_colour),
linestyle='solid', linewidth=ROC_CURVE_WIDTH
)[0]
axes_object.set_xlabel('POFD (probability of false detection)')
axes_object.set_ylabel('POD (probability of detection)')
axes_object.set_xlim(0., 1.)
axes_object.set_ylim(0., 1.)
return line_handle
def plot_bootstrapped_roc_curve(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
line_colour=ROC_CURVE_COLOUR, plot_background=True):
"""Bootstrapped version of plot_roc_curve.
T = number of probability thresholds in curve
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param ci_bottom_dict: Dictionary with the following keys, representing the
bottom of the confidence interval.
ci_bottom_dict['pod_by_threshold']: length-T numpy array of POD values
(probability of detection).
ci_bottom_dict['pofd_by_threshold']: length-T numpy array of POFD values
(probability of false detection).
:param ci_mean_dict: Same but for mean of confidence interval.
:param ci_top_dict: Same but for top of confidence interval.
:param line_colour: See doc for `plot_roc_curve`.
:param plot_background: Same.
:return: line_handle: Same.
"""
line_handle = plot_roc_curve(
axes_object=axes_object,
pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],
pofd_by_threshold=ci_mean_dict[model_eval.POFD_BY_THRESHOLD_KEY],
line_colour=line_colour, plot_background=plot_background
)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.POFD_BY_THRESHOLD_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],
x_coords_top=ci_top_dict[model_eval.POFD_BY_THRESHOLD_KEY],
y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY]
)
if polygon_object is None:
return line_handle
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(line_colour),
POLYGON_OPACITY
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch)
return line_handle
def plot_performance_diagram(
axes_object, pod_by_threshold, success_ratio_by_threshold,
line_colour=PERF_DIAGRAM_COLOUR, plot_background=True):
"""Plots performance diagram.
T = number of binarization thresholds
For the definition of a "binarization threshold" and the role they play in
performance diagrams, see
`model_evaluation.get_points_in_performance_diagram`.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
:param success_ratio_by_threshold: length-T numpy array of success ratios.
:param line_colour: Line colour.
:param plot_background: Boolean flag. If True, will plot background
(frequency-bias and CSI contours).
:return: line_handle: Line handle for ROC curve.
"""
error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
pod_by_threshold, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
pod_by_threshold, 1., allow_nan=True)
num_thresholds = len(pod_by_threshold)
expected_dim = numpy.array([num_thresholds], dtype=int)
error_checking.assert_is_numpy_array(
success_ratio_by_threshold, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(
success_ratio_by_threshold, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
success_ratio_by_threshold, 1., allow_nan=True)
error_checking.assert_is_boolean(plot_background)
if plot_background:
success_ratio_matrix, pod_matrix = model_eval.get_sr_pod_grid()
csi_matrix = model_eval.csi_from_sr_and_pod(
success_ratio_array=success_ratio_matrix, pod_array=pod_matrix
)
frequency_bias_matrix = model_eval.frequency_bias_from_sr_and_pod(
success_ratio_array=success_ratio_matrix, pod_array=pod_matrix
)
this_colour_map_object, this_colour_norm_object = (
_get_csi_colour_scheme()
)
pyplot.contourf(
success_ratio_matrix, pod_matrix, csi_matrix, CSI_LEVELS,
cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0.,
vmax=1., axes=axes_object)
colour_bar_object = plotting_utils.plot_colour_bar(
axes_object_or_matrix=axes_object, data_matrix=csi_matrix,
colour_map_object=this_colour_map_object,
colour_norm_object=this_colour_norm_object,
orientation_string='vertical', extend_min=False, extend_max=False,
fraction_of_axis_length=0.8)
colour_bar_object.set_label('CSI (critical success index)')
bias_colour_tuple = plotting_utils.colour_from_numpy_to_tuple(
FREQ_BIAS_COLOUR)
bias_colours_2d_tuple = ()
for _ in range(len(FREQ_BIAS_LEVELS)):
bias_colours_2d_tuple += (bias_colour_tuple,)
bias_contour_object = pyplot.contour(
success_ratio_matrix, pod_matrix, frequency_bias_matrix,
FREQ_BIAS_LEVELS, colors=bias_colours_2d_tuple,
linewidths=FREQ_BIAS_WIDTH, linestyles='dashed', axes=axes_object)
pyplot.clabel(
bias_contour_object, inline=True, inline_spacing=FREQ_BIAS_PADDING,
fmt=FREQ_BIAS_STRING_FORMAT, fontsize=FONT_SIZE)
nan_flags = numpy.logical_or(
numpy.isnan(success_ratio_by_threshold), numpy.isnan(pod_by_threshold)
)
if numpy.all(nan_flags):
line_handle = None
else:
real_indices = numpy.where(numpy.invert(nan_flags))[0]
line_handle = axes_object.plot(
success_ratio_by_threshold[real_indices],
pod_by_threshold[real_indices],
color=plotting_utils.colour_from_numpy_to_tuple(line_colour),
linestyle='solid', linewidth=PERF_DIAGRAM_WIDTH
)[0]
axes_object.set_xlabel('Success ratio (1 - FAR)')
axes_object.set_ylabel('POD (probability of detection)')
axes_object.set_xlim(0., 1.)
axes_object.set_ylim(0., 1.)
return line_handle
def plot_bootstrapped_performance_diagram(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
line_colour=PERF_DIAGRAM_COLOUR, plot_background=True):
"""Bootstrapped version of plot_performance_diagram.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param ci_bottom_dict: Dictionary with the following keys,
representing the bottom of the confidence interval.
ci_bottom_dict['pod_by_threshold']: length-T numpy array of POD
values (probability of detection).
ci_bottom_dict['success_ratio_by_threshold']: length-T numpy array of
success ratios.
:param ci_mean_dict: Same but for mean of confidence interval.
:param ci_top_dict: Same but for top of confidence interval.
:param line_colour: See doc for `plot_performance_diagram`.
:param plot_background: Same.
:return: line_colour: Same.
"""
line_handle = plot_performance_diagram(
axes_object=axes_object,
pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],
success_ratio_by_threshold=ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY],
line_colour=line_colour, plot_background=plot_background
)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.SR_BY_THRESHOLD_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],
x_coords_top=ci_top_dict[model_eval.SR_BY_THRESHOLD_KEY],
y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY],
for_performance_diagram=True)
if polygon_object is None:
return line_handle
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(line_colour),
POLYGON_OPACITY
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch)
return line_handle
def plot_reliability_curve(
axes_object, mean_forecast_by_bin, event_frequency_by_bin):
"""Plots reliability curve.
B = number of bins (separated by forecast probability)
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param mean_forecast_by_bin: length-B numpy array of mean forecast
probabilities.
:param event_frequency_by_bin: length-B numpy array of mean observed
labels (conditional event frequencies).
"""
error_checking.assert_is_numpy_array(
mean_forecast_by_bin, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
mean_forecast_by_bin, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
mean_forecast_by_bin, 1., allow_nan=True)
num_bins = len(mean_forecast_by_bin)
expected_dim = numpy.array([num_bins], dtype=int)
error_checking.assert_is_numpy_array(
event_frequency_by_bin, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(
event_frequency_by_bin, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
event_frequency_by_bin, 1., allow_nan=True)
perfect_x_coords, perfect_y_coords = (
model_eval.get_perfect_reliability_curve()
)
axes_object.plot(
perfect_x_coords, perfect_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(PERFECT_RELIA_COLOUR),
linestyle='dashed', linewidth=PERFECT_RELIA_WIDTH
)
nan_flags = numpy.logical_or(
numpy.isnan(mean_forecast_by_bin), numpy.isnan(event_frequency_by_bin)
)
if not numpy.all(nan_flags):
real_indices = numpy.where(numpy.invert(nan_flags))[0]
axes_object.plot(
mean_forecast_by_bin[real_indices],
event_frequency_by_bin[real_indices],
color=plotting_utils.colour_from_numpy_to_tuple(RELIABILITY_COLOUR),
linestyle='solid', linewidth=RELIABILITY_WIDTH
)
axes_object.set_xlabel('Forecast probability')
axes_object.set_ylabel('Conditional event frequency')
axes_object.set_xlim(0., 1.)
axes_object.set_ylim(0., 1.)
def plot_bootstrapped_reliability_curve(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict):
"""Bootstrapped version of plot_reliability_curve.
B = number of bins (separated by forecast probability)
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param ci_bottom_dict: Dictionary with the following keys,
representing the bottom of the confidence interval.
ci_bottom_dict['mean_forecast_by_bin']: length-B numpy array of mean
forecast probabilities.
ci_bottom_dict['event_frequency_by_bin']: length-B numpy array of
conditional event frequencies.
:param ci_mean_dict: Same but for mean of confidence interval.
:param ci_top_dict: Same but for top of confidence interval.
"""
plot_reliability_curve(
axes_object=axes_object,
mean_forecast_by_bin=ci_mean_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]
)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]
)
if polygon_object is None:
return
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(RELIABILITY_COLOUR),
POLYGON_OPACITY
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch)
def plot_attributes_diagram(
figure_object, axes_object, mean_forecast_by_bin,
event_frequency_by_bin, num_examples_by_bin):
"""Plots attributes diagram (Hsu and Murphy 1986).
:param figure_object: Instance of `matplotlib.figure.Figure`.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param mean_forecast_by_bin: See doc for `plot_reliability_curve`.
:param event_frequency_by_bin: Same.
:param num_examples_by_bin: See doc for
`_plot_inset_histogram_for_attributes_diagram`.
"""
error_checking.assert_is_numpy_array(
event_frequency_by_bin, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
event_frequency_by_bin, 0., allow_nan=True)
error_checking.assert_is_leq_numpy_array(
event_frequency_by_bin, 1., allow_nan=True)
num_bins = len(mean_forecast_by_bin)
expected_dim = numpy.array([num_bins], dtype=int)
error_checking.assert_is_integer_numpy_array(num_examples_by_bin)
error_checking.assert_is_numpy_array(
num_examples_by_bin, exact_dimensions=expected_dim)
error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)
non_empty_bin_indices = numpy.where(num_examples_by_bin > 0)[0]
error_checking.assert_is_numpy_array_without_nan(
event_frequency_by_bin[non_empty_bin_indices]
)
climatology = numpy.average(
event_frequency_by_bin[non_empty_bin_indices],
weights=num_examples_by_bin[non_empty_bin_indices]
)
_plot_background_of_attributes_diagram(
axes_object=axes_object, climatology=climatology)
_plot_inset_histogram_for_attributes_diagram(
figure_object=figure_object, num_examples_by_bin=num_examples_by_bin)
plot_reliability_curve(
axes_object=axes_object, mean_forecast_by_bin=mean_forecast_by_bin,
event_frequency_by_bin=event_frequency_by_bin)
def plot_bootstrapped_attributes_diagram(
figure_object, axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
num_examples_by_bin):
"""Bootstrapped version of plot_attributes_diagram.
:param figure_object: Instance of `matplotlib.figure.Figure`.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param ci_bottom_dict: See doc for `plot_bootstrapped_reliability_curve`.
:param ci_mean_dict: Same.
:param ci_top_dict: Same.
:param num_examples_by_bin: See doc for `plot_attributes_diagram`.
"""
plot_attributes_diagram(
figure_object=figure_object, axes_object=axes_object,
mean_forecast_by_bin=ci_mean_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
num_examples_by_bin=num_examples_by_bin
)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]
)
if polygon_object is None:
return
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(RELIABILITY_COLOUR),
POLYGON_OPACITY
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch)
| mit |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/artist.py | 10 | 47874 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import OrderedDict, namedtuple
import re
import warnings
import inspect
import numpy as np
import matplotlib
import matplotlib.cbook as cbook
from matplotlib.cbook import mplDeprecation
from matplotlib import docstring, rcParams
from .transforms import (Bbox, IdentityTransform, TransformedBbox,
TransformedPath, Transform)
from .path import Path
# Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
#
# https://mail.python.org/pipermail/python-list/2004-October/242925.html
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependant renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
def before(artist, renderer):
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
def after(artist, renderer):
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
# the axes class has a second argument inframe for its draw method.
def draw_wrapper(artist, renderer, *args, **kwargs):
before(artist, renderer)
draw(artist, renderer, *args, **kwargs)
after(artist, renderer)
# "safe wrapping" to exactly replicate anything we haven't overridden above
draw_wrapper.__name__ = draw.__name__
draw_wrapper.__dict__ = draw.__dict__
draw_wrapper.__doc__ = draw.__doc__
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
_XYPair = namedtuple("_XYPair", "x y")
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
# order of precedence when bulk setting/updating properties
# via update. The keys should be property names and the values
# integers
_prop_order = dict(color=-1)
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self._mouseover = False
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
self._sticky_edges = _XYPair([], [])
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes.mouseover_set.discard(self)
# mark the axes as stale
self.axes.stale = True
# decouple the artist from the axes
self.axes = None
_ax_flag = True
if self.figure:
self.figure = None
if not _ax_flag:
self.figure = True
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
warnings.warn(_get_axes_msg.format('set_axes'), mplDeprecation,
stacklevel=1)
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
"""
warnings.warn(_get_axes_msg.format('get_axes'), mplDeprecation,
stacklevel=1)
return self.axes
@property
def axes(self):
"""
The :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
"""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and
(self._axes is not None and new_axes != self._axes)):
raise ValueError("Can not reset the axes. You are "
"probably trying to re-use an artist "
"in more than one Axes which is not "
"supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
return new_axes
@property
def stale(self):
"""
If the artist is 'stale' and needs to be re-drawn for the output to
match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self.get_animated():
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
Process pick event
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if six.callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if mouseevent.inaxes is None or ax is None or \
mouseevent.inaxes == ax:
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
# if this is a no-op just return
if self.figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and `fig` being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self.figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self.figure = fig
if self.figure and self.figure is not self:
self.pchanged()
self.stale = True
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
print(type(path), type(transform))
raise TypeError("Invalid arguments to set_clip_path")
# this may result in the callbacks being hit twice, but grantees they
# will be hit at least once
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter fuction.
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
if self._animated != b:
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
def _update_property(self, k, v):
"""sorting out how to update property (setter or setattr)
Parameters
----------
k : str
The name of property to update
v : obj
The value to assign to the property
Returns
-------
ret : obj or None
If using a `set_*` method return it's return, else None.
"""
k = k.lower()
# white list attributes we want to be able to update through
# art.update, art.set, setp
if k in {'axes'}:
return setattr(self, k, v)
else:
func = getattr(self, 'set_' + k, None)
if func is None or not six.callable(func):
raise AttributeError('Unknown property %s' % k)
return func(v)
store = self.eventson
self.eventson = False
try:
ret = [_update_property(self, k, v)
for k, v in props.items()]
finally:
self.eventson = store
if len(ret):
self.pchanged()
self.stale = True
return ret
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
self.stale = True
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
self.stale = True
@property
def sticky_edges(self):
"""
`x` and `y` sticky edge lists.
When performing autoscaling, if a data limit coincides with a value in
the corresponding sticky_edges list, then no margin will be added--the
view limit "sticks" to the edge. A typical usecase is histograms,
where one usually expects no margin on the bottom edge (0) of the
histogram.
This attribute cannot be assigned to; however, the `x` and `y` lists
can be modified in place as needed.
Examples
--------
>>> artist.sticky_edges.x[:] = (xmin, xmax)
>>> artist.sticky_edges.y[:] = (ymin, ymax)
"""
return self._sticky_edges
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.sticky_edges.x[:] = other.sticky_edges.x[:]
self.sticky_edges.y[:] = other.sticky_edges.y[:]
self.pchanged()
self.stale = True
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""A property batch setter. Pass *kwargs* to set properties.
"""
props = OrderedDict(
sorted(kwargs.items(), reverse=True,
key=lambda x: (self._prop_order.get(x[0], 0), x[0])))
return self.update(props)
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in
c.findobj(matchfunc, include_self=False)
if matchfunc(thisc)])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Get the cursor data for a given event.
"""
return None
def format_cursor_data(self, data):
"""
Return *cursor data* string formatted.
"""
try:
data[0]
except (TypeError, IndexError):
data = [data]
return ', '.join('{:0.3g}'.format(item) for item in data if
isinstance(item, (np.floating, np.integer, int, float)))
@property
def mouseover(self):
return self._mouseover
@mouseover.setter
def mouseover(self, val):
val = bool(val)
self._mouseover = val
ax = self.axes
if ax:
if val:
ax.mouseover_set.add(self)
else:
ax.mouseover_set.discard(self)
class ArtistInspector(object):
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of :class:`Artists`.
If a sequence is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o) and len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and six.callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
o = getattr(self.o, name)
if not six.callable(o):
continue
if six.PY2:
nargs = len(inspect.getargspec(o)[0])
else:
nargs = len(inspect.getfullargspec(o)[0])
if nargs < 2:
continue
func = o
if self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target
in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) +
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and six.callable(getattr(o, name))]
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
d = self.properties()
names = list(six.iterkeys(d))
names.sort()
lines = []
for name in names:
val = d[name]
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: e.g., :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an '
'matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc
for thisc
in c.findobj(matchfunc)
if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. e.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(obj)
if len(kwargs) == 0 and len(args) == 0:
print('\n'.join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if not cbook.iterable(obj):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
# put args into ordereddict to maintain order
funcvals = OrderedDict()
for i in range(0, len(args) - 1, 2):
funcvals[args[i]] = args[i + 1]
ret = [o.update(funcvals) for o in objs]
ret.extend([o.set(**kwargs) for o in objs])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
_get_axes_msg = """{0} has been deprecated in mpl 1.5, please use the
axes property. A removal date has not been set."""
| apache-2.0 |
fredhusser/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
Jozhogg/iris | lib/iris/plot.py | 1 | 36785 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Iris-specific extensions to matplotlib, mimicking the :mod:`matplotlib.pyplot`
interface.
See also: :ref:`matplotlib <matplotlib:users-guide-index>`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import map, zip
import collections
import datetime
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
import matplotlib.axes
import matplotlib.collections as mpl_collections
import matplotlib.dates as mpl_dates
import matplotlib.transforms as mpl_transforms
import matplotlib.pyplot as plt
import matplotlib.ticker as mpl_ticker
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
import numpy as np
import numpy.ma as ma
import iris.cube
import iris.analysis.cartography as cartography
import iris.coords
# Importing iris.palette to register the brewer palettes.
import iris.palette
# Cynthia Brewer citation text.
BREWER_CITE = 'Colours based on ColorBrewer.org'
PlotDefn = collections.namedtuple('PlotDefn', ('coords', 'transpose'))
def _get_plot_defn_custom_coords_picked(cube, coords, mode, ndims=2):
def as_coord(coord):
coord = cube.coord(coord)
return coord
coords = list(map(as_coord, coords))
# Check that we were given the right number of coordinates
if len(coords) != ndims:
coord_names = ', '.join([coord.name() for coord in coords])
raise ValueError('The list of coordinates given (%s) should have the'
' same length (%s) as the dimensionality of the'
' required plot (%s)' % (coord_names,
len(coords), ndims))
# Check which dimensions are spanned by each coordinate.
get_span = lambda coord: set(cube.coord_dims(coord))
spans = list(map(get_span, coords))
for span, coord in zip(spans, coords):
if not span:
msg = 'The coordinate {!r} doesn\'t span a data dimension.'
raise ValueError(msg.format(coord.name()))
if mode == iris.coords.BOUND_MODE and len(span) != 1:
raise ValueError('The coordinate {!r} is multi-dimensional and'
' cannot be used in a cell-based plot.'
.format(coord.name()))
# Check the combination of coordinates spans enough (ndims) data
# dimensions.
total_span = set().union(*spans)
if len(total_span) != ndims:
coord_names = ', '.join([coord.name() for coord in coords])
raise ValueError('The given coordinates ({}) don\'t span the {} data'
' dimensions.'.format(coord_names, ndims))
# If we have 2-dimensional data, and one or more 1-dimensional
# coordinates, check if we need to transpose.
transpose = False
if ndims == 2 and min(map(len, spans)) == 1:
for i, span in enumerate(spans):
if len(span) == 1:
if list(span)[0] == i:
transpose = True
break
# Note the use of `reversed` to convert from the X-then-Y
# convention of the end-user API to the V-then-U convention used by
# the plotting routines.
plot_coords = list(reversed(coords))
return PlotDefn(plot_coords, transpose)
def _valid_bound_coord(coord):
result = None
if coord and coord.ndim == 1 and coord.nbounds:
result = coord
return result
def _get_plot_defn(cube, mode, ndims=2):
"""
Return data and plot-axis coords given a cube & a mode of either
POINT_MODE or BOUND_MODE.
"""
if cube.ndim != ndims:
msg = 'Cube must be %s-dimensional. Got %s dimensions.'
raise ValueError(msg % (ndims, cube.ndim))
# Start by taking the DimCoords from each dimension.
coords = [None] * ndims
for dim_coord in cube.dim_coords:
dim = cube.coord_dims(dim_coord)[0]
coords[dim] = dim_coord
# When appropriate, restrict to 1D with bounds.
if mode == iris.coords.BOUND_MODE:
coords = list(map(_valid_bound_coord, coords))
def guess_axis(coord):
axis = None
if coord is not None:
axis = iris.util.guess_coord_axis(coord)
return axis
# Allow DimCoords in aux_coords to fill in for missing dim_coords.
for dim, coord in enumerate(coords):
if coord is None:
aux_coords = cube.coords(dimensions=dim)
aux_coords = [coord for coord in aux_coords
if isinstance(coord, iris.coords.DimCoord)]
if aux_coords:
key_func = lambda coord: coord._as_defn()
aux_coords.sort(key=key_func)
coords[dim] = aux_coords[0]
if mode == iris.coords.POINT_MODE:
# Allow multi-dimensional aux_coords to override the dim_coords
# along the Z axis. This results in a preference for using the
# derived altitude over model_level_number or level_height.
# Limit to Z axis to avoid preferring latitude over grid_latitude etc.
axes = list(map(guess_axis, coords))
axis = 'Z'
if axis in axes:
for coord in cube.coords(dim_coords=False):
if max(coord.shape) > 1 and \
iris.util.guess_coord_axis(coord) == axis:
coords[axes.index(axis)] = coord
# Re-order the coordinates to achieve the preferred
# horizontal/vertical associations.
def sort_key(coord):
order = {'X': 2, 'T': 1, 'Y': -1, 'Z': -2}
axis = guess_axis(coord)
return (order.get(axis, 0), coord and coord.name())
sorted_coords = sorted(coords, key=sort_key)
transpose = (sorted_coords != coords)
return PlotDefn(sorted_coords, transpose)
def _can_draw_map(plot_coords):
std_names = [coord and coord.standard_name for coord in plot_coords]
valid_std_names = [
['latitude', 'longitude'],
['grid_latitude', 'grid_longitude'],
['projection_y_coordinate', 'projection_x_coordinate']
]
return std_names in valid_std_names
def _broadcast_2d(u, v):
# Matplotlib needs the U and V coordinates to have the same
# dimensionality (either both 1D, or both 2D). So we simply
# broadcast both to 2D to be on the safe side.
u = np.atleast_2d(u)
v = np.atleast_2d(v.T).T
u, v = np.broadcast_arrays(u, v)
return u, v
def _string_coord_axis_tick_labels(string_axes):
"""Apply tick labels for string coordinates."""
ax = plt.gca()
for axis, ticks in string_axes.items():
formatter = mpl_ticker.IndexFormatter(ticks)
locator = mpl_ticker.MaxNLocator(integer=True)
this_axis = getattr(ax, axis)
this_axis.set_major_formatter(formatter)
this_axis.set_major_locator(locator)
def _invert_yaxis(v_coord):
"""
Inverts the y-axis of the current plot based on conditions:
* If the y-axis is already inverted we don't want to re-invert it.
* If v_coord is None then it will not have any attributes.
* If neither of the above are true then invert y if v_coord has
attribute 'positive' set to 'down'.
Args:
* v_coord - the coord to be plotted on the y-axis
"""
yaxis_is_inverted = plt.gca().yaxis_inverted()
if not yaxis_is_inverted and v_coord is not None:
attr_pve = v_coord.attributes.get('positive')
if attr_pve is not None and attr_pve.lower() == 'down':
plt.gca().invert_yaxis()
def _draw_2d_from_bounds(draw_method_name, cube, *args, **kwargs):
# NB. In the interests of clarity we use "u" and "v" to refer to the
# horizontal and vertical axes on the matplotlib plot.
mode = iris.coords.BOUND_MODE
# Get & remove the coords entry from kwargs.
coords = kwargs.pop('coords', None)
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(cube, coords, mode)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
if _can_draw_map(plot_defn.coords):
result = _map_common(draw_method_name, None, iris.coords.BOUND_MODE,
cube, plot_defn, *args, **kwargs)
else:
# Obtain data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# Obtain U and V coordinates
v_coord, u_coord = plot_defn.coords
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for coord, axis_name, data_dim in zip([u_coord, v_coord],
['xaxis', 'yaxis'],
[1, 0]):
if coord:
if coord.points.dtype.char == 'S':
if coord.points.ndim != 1:
msg = 'Coord {!r} must be one-dimensional.'
raise ValueError(msg.format(coord))
if coord.bounds is not None:
msg = 'Cannot plot bounded string coordinate.'
raise ValueError(msg)
string_axes[axis_name] = coord.points
values = np.arange(data.shape[data_dim] + 1) - 0.5
else:
values = coord.contiguous_bounds()
else:
values = np.arange(data.shape[data_dim] + 1)
plot_arrays.append(values)
u, v = plot_arrays
u, v = _broadcast_2d(u, v)
draw_method = getattr(plt, draw_method_name)
result = draw_method(u, v, data, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes)
# Invert y-axis if necessary.
_invert_yaxis(v_coord)
return result
def _draw_2d_from_points(draw_method_name, arg_func, cube, *args, **kwargs):
# NB. In the interests of clarity we use "u" and "v" to refer to the
# horizontal and vertical axes on the matplotlib plot.
mode = iris.coords.POINT_MODE
# Get & remove the coords entry from kwargs.
coords = kwargs.pop('coords', None)
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(cube, coords, mode)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
if _can_draw_map(plot_defn.coords):
result = _map_common(draw_method_name, arg_func,
iris.coords.POINT_MODE, cube, plot_defn,
*args, **kwargs)
else:
# Obtain data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# Obtain U and V coordinates
v_coord, u_coord = plot_defn.coords
if u_coord:
u = u_coord.points
u = _fixup_dates(u_coord, u)
else:
u = np.arange(data.shape[1])
if v_coord:
v = v_coord.points
v = _fixup_dates(v_coord, v)
else:
v = np.arange(data.shape[0])
if plot_defn.transpose:
u = u.T
v = v.T
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for values, axis_name in zip([u, v], ['xaxis', 'yaxis']):
# Replace any string coordinates with "index" coordinates.
if values.dtype.char == 'S':
if values.ndim != 1:
raise ValueError('Multi-dimensional string coordinates '
'not supported.')
plot_arrays.append(np.arange(values.size))
string_axes[axis_name] = values
elif (values.dtype == np.dtype(object) and
isinstance(values[0], datetime.datetime)):
plot_arrays.append(mpl_dates.date2num(values))
else:
plot_arrays.append(values)
u, v = plot_arrays
u, v = _broadcast_2d(u, v)
draw_method = getattr(plt, draw_method_name)
if arg_func is not None:
args, kwargs = arg_func(u, v, data, *args, **kwargs)
result = draw_method(*args, **kwargs)
else:
result = draw_method(u, v, data, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes)
# Invert y-axis if necessary.
_invert_yaxis(v_coord)
return result
def _fixup_dates(coord, values):
if coord.units.calendar is not None and values.ndim == 1:
r = [datetime.datetime(*(coord.units.num2date(val).timetuple()[0:6]))
for val in values]
values = np.empty(len(r), dtype=object)
values[:] = r
return values
def _data_from_coord_or_cube(c):
if isinstance(c, iris.cube.Cube):
data = c.data
elif isinstance(c, iris.coords.Coord):
data = _fixup_dates(c, c.points)
else:
raise TypeError('Plot arguments must be cubes or coordinates.')
return data
def _uv_from_u_object_v_object(u_object, v_object):
ndim_msg = 'Cube or coordinate must be 1-dimensional. Got {} dimensions.'
if u_object is not None and u_object.ndim > 1:
raise ValueError(ndim_msg.format(u_object.ndim))
if v_object.ndim > 1:
raise ValueError(ndim_msg.format(v_object.ndim))
v = _data_from_coord_or_cube(v_object)
if u_object is None:
u = np.arange(v.shape[0])
else:
u = _data_from_coord_or_cube(u_object)
return u, v
def _u_object_from_v_object(v_object):
u_object = None
if isinstance(v_object, iris.cube.Cube):
plot_defn = _get_plot_defn(v_object, iris.coords.POINT_MODE, ndims=1)
u_object, = plot_defn.coords
return u_object
def _get_plot_objects(args):
if len(args) > 1 and isinstance(args[1],
(iris.cube.Cube, iris.coords.Coord)):
# two arguments
u_object, v_object = args[:2]
u, v = _uv_from_u_object_v_object(*args[:2])
args = args[2:]
if len(u) != len(v):
msg = "The x and y-axis objects are not compatible. They should " \
"have equal sizes but got ({}: {}) and ({}: {})."
raise ValueError(msg.format(u_object.name(), len(u),
v_object.name(), len(v)))
else:
# single argument
v_object = args[0]
u_object = _u_object_from_v_object(v_object)
u, v = _uv_from_u_object_v_object(u_object, args[0])
args = args[1:]
return u_object, v_object, u, v, args
def _draw_1d_from_points(draw_method_name, arg_func, *args, **kwargs):
# NB. In the interests of clarity we use "u" to refer to the horizontal
# axes on the matplotlib plot and "v" for the vertical axes.
# retrieve the objects that are plotted on the horizontal and vertical
# axes (cubes or coordinates) and their respective values, along with the
# argument tuple with these objects removed
u_object, v_object, u, v, args = _get_plot_objects(args)
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for values, axis_name in zip([u, v], ['xaxis', 'yaxis']):
# Replace any string coordinates with "index" coordinates.
if values.dtype.char == 'S':
if values.ndim != 1:
msg = 'Multi-dimensional string coordinates are not supported.'
raise ValueError(msg)
plot_arrays.append(np.arange(values.size))
string_axes[axis_name] = values
else:
plot_arrays.append(values)
u, v = plot_arrays
# if both u_object and v_object are coordinates then check if a map
# should be drawn
if isinstance(u_object, iris.coords.Coord) and \
isinstance(v_object, iris.coords.Coord) and \
_can_draw_map([v_object, u_object]):
# Replace non-cartopy subplot/axes with a cartopy alternative and set
# the transform keyword.
kwargs = _ensure_cartopy_axes_and_determine_kwargs(u_object, v_object,
kwargs)
draw_method = getattr(plt, draw_method_name)
if arg_func is not None:
args, kwargs = arg_func(u, v, *args, **kwargs)
result = draw_method(*args, **kwargs)
else:
result = draw_method(u, v, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes)
# Invert y-axis if necessary.
_invert_yaxis(v_object)
return result
def _replace_axes_with_cartopy_axes(cartopy_proj):
"""
Replace non-cartopy subplot/axes with a cartopy alternative
based on the provided projection. If the current axes are already an
instance of :class:`cartopy.mpl.geoaxes.GeoAxes` then no action is taken.
"""
ax = plt.gca()
if not isinstance(ax,
cartopy.mpl.geoaxes.GeoAxes):
fig = plt.gcf()
if isinstance(ax, matplotlib.axes.SubplotBase):
new_ax = fig.add_subplot(ax.get_subplotspec(),
projection=cartopy_proj,
title=ax.get_title(),
xlabel=ax.get_xlabel(),
ylabel=ax.get_ylabel())
else:
new_ax = fig.add_axes(projection=cartopy_proj,
title=ax.get_title(),
xlabel=ax.get_xlabel(),
ylabel=ax.get_ylabel())
# delete the axes which didn't have a cartopy projection
fig.delaxes(ax)
def _ensure_cartopy_axes_and_determine_kwargs(x_coord, y_coord, kwargs):
"""
Replace the current non-cartopy axes with :class:`cartopy.mpl.GeoAxes`
and return the appropriate kwargs dict based on the provided coordinates
and kwargs.
"""
# Determine projection.
if x_coord.coord_system != y_coord.coord_system:
raise ValueError('The X and Y coordinates must have equal coordinate'
' systems.')
cs = x_coord.coord_system
if cs is not None:
cartopy_proj = cs.as_cartopy_projection()
else:
cartopy_proj = ccrs.PlateCarree()
# Ensure the current axes are a cartopy.mpl.GeoAxes instance.
_replace_axes_with_cartopy_axes(cartopy_proj)
# Set the "from transform" keyword.
if 'transform' in kwargs:
raise ValueError("The 'transform' keyword is not allowed as it "
"automatically determined from the coordinate "
"metadata.")
new_kwargs = kwargs.copy()
new_kwargs['transform'] = cartopy_proj
return new_kwargs
def _map_common(draw_method_name, arg_func, mode, cube, plot_defn,
*args, **kwargs):
"""
Draw the given cube on a map using its points or bounds.
"Mode" parameter will switch functionality between POINT or BOUND plotting.
"""
# Generate 2d x and 2d y grids.
y_coord, x_coord = plot_defn.coords
if mode == iris.coords.POINT_MODE:
if x_coord.ndim == y_coord.ndim == 1:
x, y = np.meshgrid(x_coord.points, y_coord.points)
elif x_coord.ndim == y_coord.ndim == 2:
x = x_coord.points
y = y_coord.points
else:
raise ValueError("Expected 1D or 2D XY coords")
else:
try:
x, y = np.meshgrid(x_coord.contiguous_bounds(),
y_coord.contiguous_bounds())
# Exception translation.
except iris.exceptions.CoordinateMultiDimError:
raise ValueError("Could not get XY grid from bounds. "
"X or Y coordinate not 1D.")
except ValueError:
raise ValueError("Could not get XY grid from bounds. "
"X or Y coordinate doesn't have 2 bounds "
"per point.")
# Obtain the data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# If we are global, then append the first column of data the array to the
# last (and add 360 degrees) NOTE: if it is found that this block of code
# is useful in anywhere other than this plotting routine, it may be better
# placed in the CS.
if getattr(x_coord, 'circular', False):
_, direction = iris.util.monotonic(x_coord.points,
return_direction=True)
y = np.append(y, y[:, 0:1], axis=1)
x = np.append(x, x[:, 0:1] + 360 * direction, axis=1)
data = ma.concatenate([data, data[:, 0:1]], axis=1)
# Replace non-cartopy subplot/axes with a cartopy alternative and set the
# transform keyword.
kwargs = _ensure_cartopy_axes_and_determine_kwargs(x_coord, y_coord,
kwargs)
if arg_func is not None:
new_args, kwargs = arg_func(x, y, data, *args, **kwargs)
else:
new_args = (x, y, data) + args
# Draw the contour lines/filled contours.
return getattr(plt, draw_method_name)(*new_args, **kwargs)
def contour(cube, *args, **kwargs):
"""
Draws contour lines based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
See :func:`matplotlib.pyplot.contour` for details of other valid keyword
arguments.
"""
result = _draw_2d_from_points('contour', None, cube, *args, **kwargs)
return result
def contourf(cube, *args, **kwargs):
"""
Draws filled contours based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
See :func:`matplotlib.pyplot.contourf` for details of other valid keyword
arguments.
"""
coords = kwargs.get('coords')
kwargs.setdefault('antialiased', True)
result = _draw_2d_from_points('contourf', None, cube, *args, **kwargs)
# Matplotlib produces visible seams between anti-aliased polygons.
# But if the polygons are virtually opaque then we can cover the seams
# by drawing anti-aliased lines *underneath* the polygon joins.
# Figure out the alpha level for the contour plot
if result.alpha is None:
alpha = result.collections[0].get_facecolor()[0][3]
else:
alpha = result.alpha
# If the contours are anti-aliased and mostly opaque then draw lines under
# the seams.
if result.antialiased and alpha > 0.95:
levels = result.levels
colors = [c[0] for c in result.tcolors]
if result.extend == 'neither':
levels = levels[1:-1]
colors = colors[:-1]
elif result.extend == 'min':
levels = levels[:-1]
colors = colors[:-1]
elif result.extend == 'max':
levels = levels[1:]
colors = colors[:-1]
else:
colors = colors[:-1]
if len(levels) > 0:
# Draw the lines just *below* the polygons to ensure we minimise
# any boundary shift.
zorder = result.collections[0].zorder - .1
contour(cube, levels=levels, colors=colors, antialiased=True,
zorder=zorder, coords=coords)
# Restore the current "image" to 'result' rather than the mappable
# resulting from the additional call to contour().
plt.sci(result)
return result
def default_projection(cube):
"""
Return the primary map projection for the given cube.
Using the returned projection, one can create a cartopy map with::
import matplotlib.pyplot as plt
ax = plt.ax(projection=default_projection(cube))
"""
# XXX logic seems flawed, but it is what map_setup did...
cs = cube.coord_system("CoordSystem")
projection = cs.as_cartopy_projection() if cs else None
return projection
def default_projection_extent(cube, mode=iris.coords.POINT_MODE):
"""
Return the cube's extents ``(x0, x1, y0, y1)`` in its default projection.
Keyword arguments:
* mode - Either ``iris.coords.POINT_MODE`` or ``iris.coords.BOUND_MODE``.
Triggers whether the extent should be representative of the cell
points, or the limits of the cell's bounds.
The default is iris.coords.POINT_MODE.
"""
extents = cartography._xy_range(cube, mode)
xlim = extents[0]
ylim = extents[1]
return tuple(xlim) + tuple(ylim)
def _fill_orography(cube, coords, mode, vert_plot, horiz_plot, style_args):
# Find the orography coordinate.
orography = cube.coord('surface_altitude')
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(cube, coords, mode,
ndims=2)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
v_coord, u_coord = plot_defn.coords
# Find which plot coordinate corresponds to the derived altitude, so that
# we can replace altitude with the surface altitude.
if v_coord and v_coord.standard_name == 'altitude':
# v is altitude, so plot u and orography with orog in the y direction.
result = vert_plot(u_coord, orography, style_args)
elif u_coord and u_coord.standard_name == 'altitude':
# u is altitude, so plot v and orography with orog in the x direction.
result = horiz_plot(v_coord, orography, style_args)
else:
raise ValueError('Plot does not use hybrid height. One of the '
'coordinates to plot must be altitude, but %s and %s '
'were given.' % (u_coord.name(), v_coord.name()))
return result
def orography_at_bounds(cube, facecolor='#888888', coords=None):
"""Plots orography defined at cell boundaries from the given Cube."""
# XXX Needs contiguous orography corners to work.
raise NotImplementedError('This operation is temporarily not provided '
'until coordinates can expose 2d contiguous '
'bounds (corners).')
style_args = {'edgecolor': 'none', 'facecolor': facecolor}
def vert_plot(u_coord, orography, style_args):
u = u_coord.contiguous_bounds()
left = u[:-1]
height = orography.points
width = u[1:] - left
return plt.bar(left, height, width, **style_args)
def horiz_plot(v_coord, orography, style_args):
v = v_coord.contiguous_bounds()
bottom = v[:-1]
width = orography.points
height = v[1:] - bottom
return plt.barh(bottom, width, height, **style_args)
return _fill_orography(cube, coords, iris.coords.BOUND_MODE, vert_plot,
horiz_plot, style_args)
def orography_at_points(cube, facecolor='#888888', coords=None):
"""Plots orography defined at sample points from the given Cube."""
style_args = {'facecolor': facecolor}
def vert_plot(u_coord, orography, style_args):
x = u_coord.points
y = orography.points
return plt.fill_between(x, y, **style_args)
def horiz_plot(v_coord, orography, style_args):
y = v_coord.points
x = orography.points
return plt.fill_betweenx(y, x, **style_args)
return _fill_orography(cube, coords, iris.coords.POINT_MODE, vert_plot,
horiz_plot, style_args)
def outline(cube, coords=None):
"""
Draws cell outlines based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
"""
result = _draw_2d_from_bounds('pcolormesh', cube, facecolors='none',
edgecolors='k', antialiased=True,
coords=coords)
# set the _is_stroked property to get a single color grid.
# See https://github.com/matplotlib/matplotlib/issues/1302
result._is_stroked = False
if hasattr(result, '_wrapped_collection_fix'):
result._wrapped_collection_fix._is_stroked = False
return result
def pcolor(cube, *args, **kwargs):
"""
Draws a pseudocolor plot based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
See :func:`matplotlib.pyplot.pcolor` for details of other valid keyword
arguments.
"""
kwargs.setdefault('antialiased', True)
kwargs.setdefault('snap', False)
result = _draw_2d_from_bounds('pcolor', cube, *args, **kwargs)
return result
def pcolormesh(cube, *args, **kwargs):
"""
Draws a pseudocolor plot based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
See :func:`matplotlib.pyplot.pcolormesh` for details of other valid keyword
arguments.
"""
result = _draw_2d_from_bounds('pcolormesh', cube, *args, **kwargs)
return result
def points(cube, *args, **kwargs):
"""
Draws sample point positions based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
See :func:`matplotlib.pyplot.scatter` for details of other valid keyword
arguments.
"""
_scatter_args = lambda u, v, data, *args, **kwargs: ((u, v) + args, kwargs)
return _draw_2d_from_points('scatter', _scatter_args, cube,
*args, **kwargs)
def plot(*args, **kwargs):
"""
Draws a line plot based on the given cube(s) or coordinate(s).
The first one or two arguments may be cubes or coordinates to plot.
Each of the following is valid::
# plot a 1d cube against its dimension coordinate
plot(cube)
# plot a 1d coordinate
plot(coord)
# plot a 1d cube against a given 1d coordinate, with the cube
# values on the y-axis and the coordinate on the x-axis
plot(coord, cube)
# plot a 1d cube against a given 1d coordinate, with the cube
# values on the x-axis and the coordinate on the y-axis
plot(cube, coord)
# plot two 1d coordinates against one-another
plot(coord1, coord2)
# plot two 1d cubes against one-another
plot(cube1, cube2)
See :func:`matplotlib.pyplot.plot` for details of valid keyword
arguments.
"""
if 'coords' in kwargs:
raise TypeError('"coords" is not a valid plot keyword. Coordinates '
'and cubes may be passed as arguments for '
'full control of the plot axes.')
_plot_args = None
return _draw_1d_from_points('plot', _plot_args, *args, **kwargs)
def scatter(x, y, *args, **kwargs):
"""
Draws a scatter plot based on the given cube(s) or coordinate(s).
Args:
* x: :class:`~iris.cube.Cube` or :class:`~iris.coords.Coord`
A cube or a coordinate to plot on the x-axis.
* y: :class:`~iris.cube.Cube` or :class:`~iris.coords.Coord`
A cube or a coordinate to plot on the y-axis.
See :func:`matplotlib.pyplot.scatter` for details of valid keyword
arguments.
"""
# here we are more specific about argument types than generic 1d plotting
if not isinstance(x, (iris.cube.Cube, iris.coords.Coord)):
raise TypeError('x must be a cube or a coordinate.')
if not isinstance(y, (iris.cube.Cube, iris.coords.Coord)):
raise TypeError('y must be a cube or a coordinate.')
args = (x, y) + args
_plot_args = None
return _draw_1d_from_points('scatter', _plot_args, *args, **kwargs)
# Provide convenience show method from pyplot
show = plt.show
def symbols(x, y, symbols, size, axes=None, units='inches'):
"""
Draws fixed-size symbols.
See :mod:`iris.symbols` for available symbols.
Args:
* x: iterable
The x coordinates where the symbols will be plotted.
* y: iterable
The y coordinates where the symbols will be plotted.
* symbols: iterable
The symbols (from :mod:`iris.symbols`) to plot.
* size: float
The symbol size in `units`.
Kwargs:
* axes:
The :class:`matplotlib.axes.Axes` in which the symbols will be added.
Defaults to the current axes.
* units: ['inches', 'points']
The unit for the symbol size.
"""
if axes is None:
axes = plt.gca()
offsets = np.array(list(zip(x, y)))
# XXX "match_original" doesn't work ... so brute-force it instead.
# PatchCollection constructor ignores all non-style keywords when using
# match_original
# See matplotlib.collections.PatchCollection.__init__
# Specifically matplotlib/collections line 1053
# pc = PatchCollection(symbols, offsets=offsets, transOffset=ax.transData,
# match_original=True)
facecolors = [p.get_facecolor() for p in symbols]
edgecolors = [p.get_edgecolor() for p in symbols]
linewidths = [p.get_linewidth() for p in symbols]
pc = mpl_collections.PatchCollection(symbols, offsets=offsets,
transOffset=axes.transData,
facecolors=facecolors,
edgecolors=edgecolors,
linewidths=linewidths)
if units == 'inches':
scale = axes.figure.dpi
elif units == 'points':
scale = axes.figure.dpi / 72.0
else:
raise ValueError("Unrecognised units: '%s'" % units)
pc.set_transform(mpl_transforms.Affine2D().scale(0.5 * size * scale))
axes.add_collection(pc)
axes.autoscale_view()
def citation(text, figure=None):
"""
Add a text citation to a plot.
Places an anchored text citation in the bottom right
hand corner of the plot.
Args:
* text:
Citation text to be plotted.
Kwargs:
* figure:
Target :class:`matplotlib.figure.Figure` instance. Defaults
to the current figure if none provided.
"""
if text is not None and len(text):
if figure is None:
figure = plt.gcf()
anchor = AnchoredText(text, prop=dict(size=6), frameon=True, loc=4)
anchor.patch.set_boxstyle('round, pad=0, rounding_size=0.2')
figure.gca().add_artist(anchor)
| lgpl-3.0 |
abimannans/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
IQSS/geoconnect | scripts/tab_read/read_dv02.py | 1 | 4425 | import pandas as pd
import csvkit
from csvkit import sql as csvkit_sql
from csvkit import table
from csv import QUOTE_NONNUMERIC, QUOTE_NONE, QUOTE_MINIMAL
def msg(m): print m
def dashes(): print '-' *40
def msgt(m): dashes(); msg(m); dashes()
t3 = 'test_dv_files/CBG Annual and Longitudinal Measures.tab'
t1 = 'test_dv_files/2-ca-measures.tab'
t2 = 'test_dv_files/boston_income.tab'
t4_out = 'test_dv_files/out_file.tab'
def pandas_format():
# --------------------------------------
msgt('(1) pandas open, make formatted column')
# --------------------------------------
df = pd.read_csv(t3, sep='\t')
print df['BG_ID_10'][:5]
df['format_col'] = df['BG_ID_10'].apply(\
lambda x: "{0}".format(x))
# lambda x: '""{0}""'.format(x))
print df['format_col'][:5], len(df['format_col'][:5][1])
msgt('Columns in dataframe')
print df.columns
print df[:3].to_sql('mytable', )
return
df[:3].to_csv(t4_out,\
sep='\t',\
index=False,\
quoting=QUOTE_NONNUMERIC)
print 'file written', t4_out
def csv_to_table():
# --------------------------------------
msgt('(2) csvkit to table')
# --------------------------------------
fh = open(t4_out, 'rb')
csv_args = dict(delimiter='\t',\
quotechar='"')
print 'QUOTE_NONE', QUOTE_NONE
csv_table = table.Table.from_csv(f=fh,\
name='tname',\
snifflimit=None,\
#quoting=QUOTE_NONNUMERIC,\
# **csv_args\
)
for col in csv_table:
msg('%s, %s' % (col.name, col.type))
sql_table = csvkit_sql.make_table(csv_table, 'new_table')
create_table_sql = csvkit_sql.make_create_table_statement(sql_table, dialect="postgresql")
msg('create_table_sql: %s' % create_table_sql)
msg(csv_table.to_rows())
def csv_to_table2():
# --------------------------------------
msgt('(3) csvkit to table reformat')
# --------------------------------------
fh = open(t4_out, 'rb')
csv_args = dict(delimiter='\t',\
quotechar='"')
csv_table = table.Table.from_csv(f=fh,\
name='tname',\
snifflimit=None,\
)
print [c.name for c in csv_table]
last_col = csv_table[-1]
last_col.type = unicode
for idx, val in enumerate(last_col):
last_col[idx] = '%s' % val
#last_col = ['%s' % x for x in last_col]
#print last_col[0]
msg(csv_table.to_rows())
print [ '%s, %s' % (c.name, c.type) for c in csv_table]
return
print 'last_col', last_col.order
col_num = len(csv_table)
print 'col_num', col_num
quoted_data = [u'"%s"' % val for val in last_col]
print 'quoted_data', quoted_data
#return
new_column = table.Column(order=last_col.order,\
name=last_col.name,\
l=quoted_data,\
#normal_type=None,\
)
#normal_type=None)
csv_table.pop(-1)
csv_table.append(new_column)
sql_table = csvkit_sql.make_table(csv_table, 'new_table')
create_table_sql = csvkit_sql.make_create_table_statement(sql_table, dialect="postgresql")
msg('create_table_sql: %s' % create_table_sql)
msg(csv_table.to_rows())
return
msgt('new_column')
msg(new_column)
print new_column.name
for val in new_column: print val
#print len(new_column)
"""
print csv_table.columns
for col in csv_table:
msg('%s, %s' % (col.name, col.type))
sql_table = csvkit_sql.make_table(csv_table, 'new_table')
create_table_sql = csvkit_sql.make_create_table_statement(sql_table, dialect="postgresql")
msg('create_table_sql: %s' % create_table_sql)
msg(csv_table.to_rows())
"""
def try_detect_disable():
pandas_format()
#csv_to_table()
#csv_to_table2()
if __name__ == '__main__':
try_detect_disable()
#df = pd.read_csv(t2, sep='\t')
#print df.columns
# Add zero padded column
#
#df['TRACT-formatted']= df['TRACT'].apply(lambda x: '{0:0>6}'.format(x))
#df.to_csv('fname', '\t')
# Format Attempts
"""
nope (1) single quote in front
(2) double quotes
(3) double quotes disable sniffer
"""
| apache-2.0 |
nguyentu1602/statsmodels | statsmodels/stats/power.py | 31 | 47523 | # -*- coding: utf-8 -*-
#pylint: disable-msg=W0142
"""Statistical power, solving for nobs, ... - trial version
Created on Sat Jan 12 21:48:06 2013
Author: Josef Perktold
Example
roundtrip - root with respect to all variables
calculated, desired
nobs 33.367204205 33.367204205
effect 0.5 0.5
alpha 0.05 0.05
power 0.8 0.8
TODO:
refactoring
- rename beta -> power, beta (type 2 error is beta = 1-power) DONE
- I think the current implementation can handle any kinds of extra keywords
(except for maybe raising meaningful exceptions
- streamline code, I think internally classes can be merged
how to extend to k-sample tests?
user interface for different tests that map to the same (internal) test class
- sequence of arguments might be inconsistent,
arg and/or kwds so python checks what's required and what can be None.
- templating for docstrings ?
"""
from __future__ import print_function
from statsmodels.compat.python import iteritems
import numpy as np
from scipy import stats, optimize
from statsmodels.tools.rootfinding import brentq_expanding
def ttest_power(effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate power of a ttest
'''
d = effect_size
if df is None:
df = nobs - 1
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit_upp = stats.t.isf(alpha_, df)
#print crit_upp, df, d*np.sqrt(nobs)
# use private methods, generic methods return nan with negative d
if np.any(np.isnan(crit_upp)):
# avoid endless loop, https://github.com/scipy/scipy/issues/2667
pow_ = np.nan
else:
pow_ = stats.nct._sf(crit_upp, df, d*np.sqrt(nobs))
if alternative in ['two-sided', '2s', 'smaller']:
crit_low = stats.t.ppf(alpha_, df)
#print crit_low, df, d*np.sqrt(nobs)
if np.any(np.isnan(crit_low)):
pow_ = np.nan
else:
pow_ += stats.nct._cdf(crit_low, df, d*np.sqrt(nobs))
return pow_
def normal_power(effect_size, nobs, alpha, alternative='two-sided', sigma=1.):
'''Calculate power of a normal distributed test statistic
'''
d = effect_size
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit = stats.norm.isf(alpha_)
pow_ = stats.norm.sf(crit - d*np.sqrt(nobs)/sigma)
if alternative in ['two-sided', '2s', 'smaller']:
crit = stats.norm.ppf(alpha_)
pow_ += stats.norm.cdf(crit - d*np.sqrt(nobs)/sigma)
return pow_
def ftest_anova_power(effect_size, nobs, alpha, k_groups=2, df=None):
'''power for ftest for one way anova with k equal sized groups
nobs total sample size, sum over all groups
should be general nobs observations, k_groups restrictions ???
'''
df_num = nobs - k_groups
df_denom = k_groups - 1
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, effect_size**2 * nobs)
return pow_#, crit
def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
nc = effect_size**2 * (df_denom + df_num + ncc)
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, nc)
return pow_ #, crit, nc
#class based implementation
#--------------------------
class Power(object):
'''Statistical Power calculations, Base Class
so far this could all be class methods
'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
# used only for instance level start values
self.start_ttp = dict(effect_size=0.01, nobs=10., alpha=0.15,
power=0.6, nobs1=10., ratio=1,
df_num=10, df_denom=3 # for FTestPower
)
# TODO: nobs1 and ratio are for ttest_ind,
# need start_ttp for each test/class separately,
# possible rootfinding problem for effect_size, starting small seems to
# work
from collections import defaultdict
self.start_bqexp = defaultdict(dict)
for key in ['nobs', 'nobs1', 'df_num', 'df_denom']:
self.start_bqexp[key] = dict(low=2., start_upp=50.)
for key in ['df_denom']:
self.start_bqexp[key] = dict(low=1., start_upp=50.)
for key in ['ratio']:
self.start_bqexp[key] = dict(low=1e-8, start_upp=2)
for key in ['alpha']:
self.start_bqexp[key] = dict(low=1e-12, upp=1 - 1e-12)
def power(self, *args, **kwds):
raise NotImplementedError
def _power_identity(self, *args, **kwds):
power_ = kwds.pop('power')
return self.power(*args, **kwds) - power_
def solve_power(self, **kwds):
'''solve for any one of the parameters of a t-test
for t-test the keywords are:
effect_size, nobs, alpha, power
exactly one needs to be ``None``, all others need numeric values
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
'''
#TODO: maybe use explicit kwds,
# nicer but requires inspect? and not generic across tests
# I'm duplicating this in the subclass to get informative docstring
key = [k for k,v in iteritems(kwds) if v is None]
#print kwds, key;
if len(key) != 1:
raise ValueError('need exactly one keyword that is None')
key = key[0]
if key == 'power':
del kwds['power']
return self.power(**kwds)
self._counter = 0
def func(x):
kwds[key] = x
fval = self._power_identity(**kwds)
self._counter += 1
#print self._counter,
if self._counter > 500:
raise RuntimeError('possible endless loop (500 NaNs)')
if np.isnan(fval):
return np.inf
else:
return fval
#TODO: I'm using the following so I get a warning when start_ttp is not defined
try:
start_value = self.start_ttp[key]
except KeyError:
start_value = 0.9
print('Warning: using default start_value for {0}'.format(key))
fit_kwds = self.start_bqexp[key]
fit_res = []
#print vars()
try:
val, res = brentq_expanding(func, full_output=True, **fit_kwds)
failed = False
fit_res.append(res)
except ValueError:
failed = True
fit_res.append(None)
success = None
if (not failed) and res.converged:
success = 1
else:
# try backup
#TODO: check more cases to make this robust
val, infodict, ier, msg = optimize.fsolve(func, start_value,
full_output=True) #scalar
#val = optimize.newton(func, start_value) #scalar
fval = infodict['fvec']
fit_res.append(infodict)
if ier == 1 and np.abs(fval) < 1e-4 :
success = 1
else:
#print infodict
if key in ['alpha', 'power', 'effect_size']:
val, r = optimize.brentq(func, 1e-8, 1-1e-8,
full_output=True) #scalar
success = 1 if r.converged else 0
fit_res.append(r)
else:
success = 0
if not success == 1:
import warnings
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
convergence_doc)
warnings.warn(convergence_doc, ConvergenceWarning)
#attach fit_res, for reading only, should be needed only for debugging
fit_res.insert(0, success)
self.cache_fit_res = fit_res
return val
def plot_power(self, dep_var='nobs', nobs=None, effect_size=None,
alpha=0.05, ax=None, title=None, plt_kwds=None, **kwds):
'''plot power with number of observations or effect size on x-axis
Parameters
----------
dep_var : string in ['nobs', 'effect_size', 'alpha']
This specifies which variable is used for the horizontal axis.
If dep_var='nobs' (default), then one curve is created for each
value of ``effect_size``. If dep_var='effect_size' or alpha, then
one curve is created for each value of ``nobs``.
nobs : scalar or array_like
specifies the values of the number of observations in the plot
effect_size : scalar or array_like
specifies the values of the effect_size in the plot
alpha : float or array_like
The significance level (type I error) used in the power
calculation. Can only be more than a scalar, if ``dep_var='alpha'``
ax : None or axis instance
If ax is None, than a matplotlib figure is created. If ax is a
matplotlib axis instance, then it is reused, and the plot elements
are created with it.
title : string
title for the axis. Use an empty string, ``''``, to avoid a title.
plt_kwds : None or dict
not used yet
kwds : optional keywords for power function
These remaining keyword arguments are used as arguments to the
power function. Many power function support ``alternative`` as a
keyword argument, two-sample test support ``ratio``.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This works only for classes where the ``power`` method has
``effect_size``, ``nobs`` and ``alpha`` as the first three arguments.
If the second argument is ``nobs1``, then the number of observations
in the plot are those for the first sample.
TODO: fix this for FTestPower and GofChisquarePower
TODO: maybe add line variable, if we want more than nobs and effectsize
'''
#if pwr_kwds is None:
# pwr_kwds = {}
from statsmodels.graphics import utils
from statsmodels.graphics.plottools import rainbow
fig, ax = utils.create_mpl_ax(ax)
import matplotlib.pyplot as plt
colormap = plt.cm.Dark2 #pylint: disable-msg=E1101
plt_alpha = 1 #0.75
lw = 2
if dep_var == 'nobs':
colors = rainbow(len(effect_size))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(effect_size))]
for ii, es in enumerate(effect_size):
power = self.power(es, nobs, alpha, **kwds)
ax.plot(nobs, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='es=%4.2F' % es)
xlabel = 'Number of Observations'
elif dep_var in ['effect size', 'effect_size', 'es']:
colors = rainbow(len(nobs))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(nobs))]
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(effect_size, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'Effect Size'
elif dep_var in ['alpha']:
# experimental nobs as defining separate lines
colors = rainbow(len(nobs))
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(alpha, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'alpha'
else:
raise ValueError('depvar not implemented')
if title is None:
title = 'Power of Test'
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(loc='lower right')
return fig
class TTestPower(Power):
'''Statistical Power calculations for one sample or paired sample t-test
'''
def power(self, effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate the power of a t-test for one sample or paired samples.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
df : int or float
degrees of freedom. By default this is None, and the df from the
one sample or paired ttest is used, ``df = nobs1 - 1``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
# for debugging
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df,
alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
alternative='two-sided'):
'''solve for any one parameter of the power of a one sample t-test
for the one sample t-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
This test can also be used for a paired t-test, where effect size is
defined in terms of the mean difference, and nobs is the number of
pairs.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# for debugging
#print 'calling ttest solve with', (effect_size, nobs, alpha, power, alternative)
return super(TTestPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
power=power,
alternative=alternative)
class TTestIndPower(Power):
'''Statistical Power calculations for t-test for two independent sample
currently only uses pooled variance
'''
def power(self, effect_size, nobs1, alpha, ratio=1, df=None,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments, it has to be explicitly set to None.
df : int or float
degrees of freedom. By default this is None, and the df from the
ttest with pooled variance is used, ``df = (nobs1 - 1 + nobs2 - 1)``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
nobs2 = nobs1*ratio
#pooled variance
if df is None:
df = (nobs1 - 1 + nobs2 - 1)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample t-test
for t-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(TTestIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class NormalIndPower(Power):
'''Statistical Power calculations for z-test for two independent samples.
currently only uses pooled variance
'''
def __init__(self, ddof=0, **kwds):
self.ddof = ddof
super(NormalIndPower, self).__init__(**kwds)
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
ddof = self.ddof # for correlation, ddof=3
# get effective nobs, factor for std of test statistic
if ratio > 0:
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / (nobs1 - ddof) + 1. / (nobs2 - ddof))
else:
nobs = nobs1 - ddof
return normal_power(effect_size, nobs, alpha, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation.
If ratio=0, then this is the standardized mean in the one sample
test.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(NormalIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class FTestPower(Power):
'''Statistical Power calculations for generic F-test
'''
def power(self, effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
pow_ = ftest_power(effect_size, df_num, df_denom, alpha, ncc=ncc)
#print effect_size, df_num, df_denom, alpha, pow_
return pow_
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, df_num=None, df_denom=None,
nobs=None, alpha=None, power=None, ncc=1):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, df_num, df_denom, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(FTestPower, self).solve_power(effect_size=effect_size,
df_num=df_num,
df_denom=df_denom,
alpha=alpha,
power=power,
ncc=ncc)
class FTestAnovaPower(Power):
'''Statistical Power calculations F-test for one factor balanced ANOVA
'''
def power(self, effect_size, nobs, alpha, k_groups=2):
'''Calculate the power of a F-test for one factor ANOVA.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
k_groups : int or float
number of groups in the ANOVA or k-sample comparison. Default is 2.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
return ftest_anova_power(effect_size, nobs, alpha, k_groups=k_groups)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
k_groups=2):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# update start values for root finding
if not k_groups is None:
self.start_ttp['nobs'] = k_groups * 10
self.start_bqexp['nobs'] = dict(low=k_groups * 2,
start_upp=k_groups * 10)
# first attempt at special casing
if effect_size is None:
return self._solve_effect_size(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
return super(FTestAnovaPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
def _solve_effect_size(self, effect_size=None, nobs=None, alpha=None,
power=None, k_groups=2):
'''experimental, test failure in solve_power for effect_size
'''
def func(x):
effect_size = x
return self._power_identity(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
val, r = optimize.brentq(func, 1e-8, 1-1e-8, full_output=True)
if not r.converged:
print(r)
return val
class GofChisquarePower(Power):
'''Statistical Power calculations for one sample chisquare test
'''
def power(self, effect_size, nobs, alpha, n_bins, ddof=0):
#alternative='two-sided'):
'''Calculate the power of a chisquare test for one sample
Only two-sided alternative is implemented
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
n_bins : int
number of bins or cells in the distribution.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
return chisquare_power(effect_size, nobs, n_bins, alpha, ddof=0)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None,
power=None, n_bins=2):
'''solve for any one parameter of the power of a one sample chisquare-test
for the one sample chisquare-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
n_bins needs to be defined, a default=2 is used.
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
n_bins : int
number of bins or cells in the distribution
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(GofChisquarePower, self).solve_power(effect_size=effect_size,
nobs=nobs,
n_bins=n_bins,
alpha=alpha,
power=power)
class _GofChisquareIndPower(Power):
'''Statistical Power calculations for chisquare goodness-of-fit test
TODO: this is not working yet
for 2sample case need two nobs in function
no one-sided chisquare test, is there one? use normal distribution?
-> drop one-sided options?
'''
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a chisquare for two independent sample
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
return chisquare_power(effect_size, nobs, alpha)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(_GofChisquareIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
#shortcut functions
tt_solve_power = TTestPower().solve_power
tt_ind_solve_power = TTestIndPower().solve_power
zt_ind_solve_power = NormalIndPower().solve_power
| bsd-3-clause |
fastai/fastai | fastai/data/transforms.py | 1 | 14935 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_data.transforms.ipynb (unless otherwise specified).
__all__ = ['get_files', 'FileGetter', 'image_extensions', 'get_image_files', 'ImageGetter', 'get_text_files',
'ItemGetter', 'AttrGetter', 'RandomSplitter', 'TrainTestSplitter', 'IndexSplitter', 'GrandparentSplitter',
'FuncSplitter', 'MaskSplitter', 'FileSplitter', 'ColSplitter', 'RandomSubsetSplitter', 'parent_label',
'RegexLabeller', 'ColReader', 'CategoryMap', 'Categorize', 'Category', 'MultiCategorize', 'MultiCategory',
'OneHotEncode', 'EncodedMultiCategorize', 'RegressionSetup', 'get_c', 'ToTensor', 'IntToFloatTensor',
'broadcast_vec', 'Normalize']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from sklearn.model_selection import train_test_split
import posixpath
# Cell
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# Cell
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
# Cell
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
# Cell
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
# Cell
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
# Cell
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified"
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
# Cell
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
# Cell
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
# Cell
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
# Cell
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(list(torch.randperm(len(o)).numpy()))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
# Cell
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,
stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
# Cell
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
# Cell
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# Cell
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
# Cell
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
# Cell
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
# Cell
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read_text().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
# Cell
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
# Cell
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(list(torch.randperm(len(o)).numpy()))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
# Cell
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
# Cell
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
o = str(o).replace(os.sep, posixpath.sep)
res = self.matcher(o)
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# Cell
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' or c=='cat' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
# Cell
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
# Cell
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o):
try:
return TensorCategory(self.vocab.o2i[o])
except KeyError as e:
raise KeyError(f"Label '{o}' was not included in the training dataset") from e
def decodes(self, o): return Category (self.vocab [o])
# Cell
class Category(str, ShowTitle): _show_args = {'label': 'category'}
# Cell
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
# Cell
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
# Cell
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# Cell
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
# Cell
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
# Cell
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
# Cell
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# Cell
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
# Cell
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# Cell
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch") | apache-2.0 |
castagnini/swim | record.py | 1 | 2788 | from scipy.optimize import leastsq
from scipy import *
import commands
from numpy import *
from timings import *
from strCheck import *
from matplotlib import *
from pylab import *
import matplotlib.dates as mdates
import sys
def fitfunc (p, x):
return ( p[2] + abs(p[0])/(abs(p[1]) + x))
def errfunc ( p, x, y):
function = (y - fitfunc(p, x))
return function
def lsqfit(xdata, ydata, pinit):
#this make sure we use 'array' class anda not 'list' class
arrxdata = array(xdata[1:])
arrydata = array(ydata[1:])
# calls least squares
fit_out = leastsq(errfunc, pinit, args=(arrxdata,arrydata), full_output=1, maxfev=100000)
pfinal = fit_out[0]
return pfinal
status,output= commands.getstatusoutput("ls Activities/*_*m.txt")
fnames = output.split('\n')
fn_dates = []
for fn in fnames:
fn_dates.append(getDate(fn))
# sort activities based on dates
fn_dates, fnames = (list(t) for t in zip(*sorted(zip(fn_dates, fnames))))
wanted_style = int (sys.argv[1]) #1 # 0 = "Freestyle"
wanted_length = int (sys.argv[2]) #100
if sys.argv[3] == "fit":
do_fit = True
else:
do_fit = False
current = 99999.0
records=[]
dates = []
for fn in fnames:
f = open(fn)
date = getDate(fn)
best_of_day = 99999.0
for line in f:
# we want the interval 3 not the single splits 3.1, 3.2, etc
if (checkIsInterval(line) == False):
continue
style = getStyle(line)
# no drills, no Mixed,
if (style != wanted_style):
continue
line = strIntCorrection(line)
tokens = line.split(',')
meters = int(tokens[3])
if (meters != wanted_length):
continue
seconds = getSeconds(tokens[4])
if seconds < best_of_day :
best_of_day = seconds
f.close()
# if there was no 100m freestyle
if best_of_day > 99998.0:
continue
if current > best_of_day :
current = best_of_day
records.append(current)
dates.append(date)
print fn
formatter = FuncFormatter(toMMSS)
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(formatter)
titles = ["Freestyle", "Butterfly", "Breaststroke", "Backstroke"]
title(str(wanted_length) + " m "+ titles[wanted_style] + " record progression")
plot_date(dates, records, label="new records")
fit_days=[]
for T in range(len(dates)):
fit_days.append ( (dates[T] -dates[0]).days )
pinit = [1., 0. ,70.]
if do_fit :
res =lsqfit(fit_days, records, pinit )
plt_days=[]
plt_lim=[]
for T in range(500):
plt_days.append ( ( datetime.timedelta(days=(T+1)) + dates[0]) )
if do_fit:
plt_lim.append(res[2])
if do_fit:
fit_y = fitfunc(res,arange(500))
plot_date(plt_days,fit_y, "-", label=r'$A + \frac{B}{C + t}$')
plot_date(plt_days,plt_lim, "-", label=r'limit $t\rightarrow\infty$')
xlim(dates[0]-datetime.timedelta(days=3),dates[len(dates)-1] + datetime.timedelta(days=3))
legend()
show()
| mit |
jakevdp/mpld3 | mpld3/tests/test_html.py | 2 | 1145 | """
Test html output
"""
import numpy as np
import matplotlib.pyplot as plt
from .. import fig_to_html, urls
from numpy.testing import assert_equal
def test_html():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--ok', alpha=0.3, zorder=10, lw=2)
d3_url = "http://this.is.a.test/d3.js"
mpld3_url = "http://this.is.a.test/mpld3.js"
for template_type in ["simple", "notebook", "general"]:
html1 = fig_to_html(fig, template_type=template_type)
html2 = fig_to_html(fig, d3_url, mpld3_url,
template_type=template_type)
# use [:-3] to strip .js from the end (it's not used in require)
assert urls.D3_URL[:-3] in html1
assert urls.MPLD3_URL[:-3] in html1
assert d3_url[:-3] in html2
assert mpld3_url[:-3] in html2
def test_no_scripts_added():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--ok', alpha=0.3, zorder=10, lw=2)
html = fig_to_html(fig, include_libraries=False)
assert urls.D3_URL[:-3] not in html
assert urls.MPLD3_URL[:-3] not in html
| bsd-3-clause |
russel1237/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
FederatedAI/FATE | python/federatedml/transfer_learning/hetero_ftl/test/test_ftl_modules.py | 1 | 3945 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from federatedml.util import consts
from federatedml.nn.homo_nn.nn_model import get_nn_builder
import json
from federatedml.param.ftl_param import FTLParam
from numpy import array
from fate_arch.session import computing_session as session
import pandas as pd
from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.transfer_learning.hetero_ftl.ftl_guest import FTLGuest
from federatedml.transfer_learning.hetero_ftl.ftl_host import FTLHost
from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL
from federatedml.param.ftl_param import FTLParam
from federatedml.feature.instance import Instance
import json
class TestFTL(unittest.TestCase):
def setUp(self):
session.init('test', 0)
def test_guest_model_init(self):
model = FTLGuest()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_host_model_init(self):
model = FTLHost()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_label_reset(self):
l = []
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = -1
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = 1
table = session.parallelize(l, partition=4, include_key=False)
rs = FTL().check_label(table)
new_label = [i[1].label for i in list(rs.collect())]
print(new_label)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
moutai/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/tsa/stattools.py | 26 | 37127 | """
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types, lzip,
zip, map)
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from .tsatools import lagmat, lagmat2ds, add_trend
from .adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa.arima_model import ARMA
from statsmodels.compat.scipy import _next_regular
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller']
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximimizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class.
modargs : tuple
args to pass to model. See notes.
fitargs : tuple
args to pass to fit. See notes.
lagstart : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : str {"aic","bic","t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
'''
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : str {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterium
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant at
the 95 % level.
store : bool
If True, then a result instance is returned additionally to
the adf statistic (default is False)
regresults : bool
If True, the full regression results are returned (default is False)
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
usedlag : int
Number of lags used.
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
regresults : RegressionResults instance
The
resstore : (optional) instance of ResultStore
an instance of a dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables.
If the p-value is close to significant, then the critical values should be
used to judge whether to accept or reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
see example script
References
----------
Greene
Hamilton
P-Values (regression surface approximation)
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
Critical values
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
'''
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False):
'''
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
acovf : array
autocovariance function
'''
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
n = len(x)
if demean:
xo = x - x.mean()
else:
xo = x
if unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
else:
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
Frf = np.fft.fft(xo, n=nobs * 2)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[n - 1:]
return acov.real
else:
return (np.correlate(xo, xo, 'full') / d)[n - 1:]
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None):
'''
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
nobs = len(x)
d = nobs # changes if unbiased
if not fft:
avf = acovf(x, unbiased=unbiased, demean=True)
#acf = np.take(avf/avf[0], range(1,nlags+1))
acf = avf[:nlags + 1] / avf[0]
else:
x = np.squeeze(np.asarray(x))
#JP: move to acovf
x0 = x - x.mean()
# ensure that we always use a power of 2 or 3 for zero-padding,
# this way we'll ensure O(n log n) runtime of the fft.
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(x0, n=n) # zero-pad for separability
if unbiased:
d = nobs - np.arange(nobs)
acf = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d
acf /= acf[0]
#acf = np.take(np.real(acf), range(1,nlags+1))
acf = np.real(acf[:nlags + 1]) # keep lag 0
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
'''Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
'''
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d, (nobs,2)
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y1, y2, regression="c"):
"""
This is a simple cointegration test. Uses unit-root test on residuals to
test for cointegrated relationship
See Hamilton (1994) 19.2
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
c : str {'c'}
Included in regression
* 'c' : Constant
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values are obtained through regression surface approximation from
MacKinnon 1994.
References
----------
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
"""
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
y1 = np.asarray(y1)
y2 = np.asarray(y2)
if regression == 'c':
y2 = add_constant(y2, prepend=False)
st1_resid = OLS(y1, y2).fit().resid # stage one residuals
lgresid_cons = add_constant(st1_resid[0:-1], prepend=False)
uroot_reg = OLS(st1_resid[1:], lgresid_cons).fit()
coint_t = (uroot_reg.params[0] - 1) / uroot_reg.bse[0]
pvalue = mackinnonp(coint_t, regression="c", N=2, lags=None)
crit_value = mackinnoncrit(N=1, regression="c", nobs=len(y1))
return coint_t, pvalue, crit_value
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ((hasattr(error, 'message') and 'initial' not in error.message)
or 'initial' in str(error)): # py2 and py3
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.macrodata.load().data
x = data['realgdp']
# adf is tested now.
adf = adfuller(x, 4, autolag=None)
adfbic = adfuller(x, autolag="bic")
adfaic = adfuller(x, autolag="aic")
adftstat = adfuller(x, autolag="t-stat")
# acf is tested now
acf1, ci1, Q, pvalue = acf(x, nlags=40, confint=95, qstat=True)
acf2, ci2, Q2, pvalue2 = acf(x, nlags=40, confint=95, fft=True, qstat=True)
acf3, ci3, Q3, pvalue3 = acf(x, nlags=40, confint=95, qstat=True,
unbiased=True)
acf4, ci4, Q4, pvalue4 = acf(x, nlags=40, confint=95, fft=True, qstat=True,
unbiased=True)
# pacf is tested now
# pacf1 = pacorr(x)
# pacfols = pacf_ols(x, nlags=40)
# pacfyw = pacf_yw(x, nlags=40, method="mle")
y = np.random.normal(size=(100, 2))
grangercausalitytests(y, 2)
| bsd-3-clause |
ifcharming/voltdb2.1 | tools/vis-micro.py | 4 | 8084 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
import time
import datetime
import MySQLdb
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
class Stat:
def __init__(self, hostname, username, password, database):
self.conn = MySQLdb.connect(host = hostname,
user = username,
passwd = password,
db = database)
self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
def close(self):
self.cursor.close()
self.conn.close()
class LatencyStat(Stat):
LATENCIES = """
SELECT startTime AS time, numHosts AS hosts, AVG(latencies) AS latency
FROM ma_instances AS runs
JOIN ma_clientInstances AS clients ON clusterStartTime = startTime
JOIN (SELECT instanceId, AVG(clusterRoundtripAvg) AS latencies
FROM ma_clientProcedureStats
GROUP BY instanceId) AS stats ON stats.instanceId = clientInstanceId
WHERE runs.startTime >= '%s'
AND clients.applicationName = "Microbenchmark."
AND clients.subApplicationName = "Workload."
GROUP BY startTime
LIMIT %u
"""
def get_latencies(self, start_time, count):
res = []
latencies = {}
self.cursor.execute(self.LATENCIES % (start_time, count))
res = list(self.cursor.fetchall())
for i in res:
i["time"] = datetime.date.fromtimestamp(i["time"] / 1000.0)
key = (i["time"], i["hosts"])
if i["latency"] == None:
continue
if key not in latencies \
or i["latency"] < latencies[key]["latency"]:
latencies[key] = i
return latencies.values()
class ThroughputStat(Stat):
THROUGHPUT = """
SELECT resultid as id,
hostcount as hosts,
date(time) as time,
avg(txnpersecond) as tps
FROM results
WHERE time >= '%s'
AND benchmarkname = 'org.voltdb.benchmark.workloads.Generator'
GROUP BY hostcount, date(time)
ORDER BY time DESC
LIMIT %u
"""
def get_throughputs(self, time, count):
throughput_map = {}
self.cursor.execute(self.THROUGHPUT % (time, count))
return list(self.cursor.fetchall())
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
fig.autofmt_xdate()
def plot(self, x, y, color, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend), marker="^",
markerfacecolor=color, markersize=10)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
plt.legend(loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def parse_credentials(filename):
credentials = {}
fd = open(filename, "r")
for i in fd:
line = i.strip().split("?")
credentials["hostname"] = line[0].split("/")[-2]
db = line[0].split("/")[-1]
pair = line[1].split("&")
user = pair[0].strip("\\").split("=")
password = pair[1].strip("\\").split("=")
if user[1].startswith("monitor"):
credentials["latency"] = {user[0]: user[1],
password[0]: password[1],
"database": db}
else:
credentials["throughput"] = {user[0]: user[1],
password[0]: password[1],
"database": db}
fd.close()
return credentials
def usage():
print "Usage:"
print "\t", sys.argv[0], "credential_file output_dir filename_base" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 4:
usage()
exit(-1)
if not os.path.exists(sys.argv[2]):
print sys.argv[2], "does not exist"
exit(-1)
credentials = parse_credentials(sys.argv[1])
path = os.path.join(sys.argv[2], sys.argv[3])
width = None
height = None
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[5])
latency_stat = LatencyStat(credentials["hostname"],
credentials["latency"]["user"],
credentials["latency"]["password"],
credentials["latency"]["database"])
volt_stat = ThroughputStat(credentials["hostname"],
credentials["throughput"]["user"],
credentials["throughput"]["password"],
credentials["throughput"]["database"])
timedelta = datetime.timedelta(days=30)
starttime = datetime.datetime.now() - timedelta
timestamp = time.mktime(starttime.timetuple()) * 1000.0
latencies = latency_stat.get_latencies(timestamp, 900)
throughput = volt_stat.get_throughputs(starttime, 900)
latency_stat.close()
volt_stat.close()
latency_map = {}
latencies.sort(key=lambda x: x["time"])
for v in latencies:
if v["time"] == None or v["latency"] == None:
continue
if v["hosts"] not in latency_map:
latency_map[v["hosts"]] = {"time": [], "latency": []}
datenum = matplotlib.dates.date2num(v["time"])
latency_map[v["hosts"]]["time"].append(datenum)
latency_map[v["hosts"]]["latency"].append(v["latency"])
if 1 in latency_map:
pl = Plot("Average Latency on Single Node", "Time", "Latency (ms)",
path + "-latency-single.png",
width, height)
v = latency_map.pop(1)
pl.plot(v["time"], v["latency"], COLORS(1), 1)
pl.close()
if len(latency_map) > 0:
pl = Plot("Average Latency", "Time", "Latency (ms)",
path + "-latency.png", width, height)
for k in latency_map.iterkeys():
v = latency_map[k]
pl.plot(v["time"], v["latency"], COLORS(k), k)
pl.close()
throughput_map = {}
throughput.sort(key=lambda x: x["id"])
for v in throughput:
if v["hosts"] not in throughput_map:
throughput_map[v["hosts"]] = {"time": [], "tps": []}
datenum = matplotlib.dates.date2num(v["time"])
# print "hosts", v["hosts"], "time", v["time"], "tps", v["tps"]
throughput_map[v["hosts"]]["time"].append(datenum)
throughput_map[v["hosts"]]["tps"].append(v["tps"])
if 1 in throughput_map:
pl = Plot("Single Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-single.png",
width, height)
v = throughput_map.pop(1)
pl.plot(v["time"], v["tps"], COLORS(1), 1)
pl.close()
if len(throughput_map) > 0:
pl = Plot("Performance", "Time", "Throughput (txns/sec)",
path + "-throughput.png", width, height)
for k in throughput_map.iterkeys():
v = throughput_map[k]
pl.plot(v["time"], v["tps"], COLORS(k), k)
pl.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
aquemy/HCBR | script/biais.py | 1 | 3235 | import argparse
import pandas
import os
import numpy as np
import matplotlib.pyplot as plt
import functools
from collections import Counter
import math
import random
def accuracy(decision_vector):
ok = len([s[2] for s in decision_vector if confusion_matrix_label(s) in ['TP', 'TN']])
return float(ok) / len(decision_vector)
def determine_bias(mu0, mu1, weights, J, f=accuracy):
bias = 0
dr = functools.partial(decision_rule, bias=bias)
predictor = np.vectorize(dr)
D, S, S0, S1 = calculate_decision_vector(predictor, mu1, mu0, weights, J)
decision_vector = np.column_stack((D,J,S,S,D))
confusion_matrix, labels = calculate_confusion_matrix(decision_vector)
max_v = 0
max_i = 0
for i, e in enumerate(decision_vector):
if labels[i] in ['FP', 'FN']:
dr = functools.partial(decision_rule, bias=e[3])
predictor = np.vectorize(dr)
D, S, S0, S1 = calculate_decision_vector(predictor, mu1, mu0, weights, J)
dv = np.column_stack((D,J,S,S,D))
confusion_matrix, labels = calculate_confusion_matrix(dv)
v = f(dv)
max_v = max_v if max_v > v else v
max_i = max_i if max_v > v else i
#print('{}/{} - {} | {}'.format(i, len(decision_vector), max_v, decision_vector[max_i][3]))
return decision_vector[max_i][3]
def decision_rule(s, eta1=0, eta0=0, l1=1, l0=0, bias=0):
if s > bias:
if s > eta1:
return 1
else:
return l1
else:
if s < eta0:
return 0
else:
return l0
def confusion_matrix_label(o,i=0):
if o[1] == 1:
return 'TP' if o[i] == 1 else 'FN'
else:
return 'TN' if o[i] == 0 else 'FP'
def calculate_confusion_matrix(decision_vector):
cf_label = np.array(map(confusion_matrix_label, decision_vector))
return Counter(cf_label), cf_label
def calculate_decision_vector(predictor, mu1, mu0, weights, J):
S1 = np.matmul(weights, mu1)
S0 = np.matmul(weights, mu0)
S = S1 - S0
D = predictor(S)
return D, S, S0, S1
def main(args):
weights = pandas.read_table(args.weights, delim_whitespace=True, header=None)
mu0 = pandas.read_table(args.mu0, delim_whitespace=True, header=None)
mu1 = pandas.read_table(args.mu1, delim_whitespace=True, header=None)
J = pandas.read_table(args.outcomes, delim_whitespace=True, header=None)[:len(weights)]
weights = weights.values
mu0 = mu0.values
mu1 = mu1.values
J = J.values
bias = determine_bias(mu0, mu1, weights, J)
print(bias)
def parse_args(parser0):
args = parser.parse_args()
# Check path
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adjust model')
parser.add_argument('--weights', type=str)
parser.add_argument('--mu0', type=str)
parser.add_argument('--mu1', type=str)
parser.add_argument('--outcomes', type=str)
parser.add_argument('--l1', default=1, type=int)
parser.add_argument('--l0', default=0, type=int)
parser.add_argument('--eta1', default=0., type=float)
parser.add_argument('--eta0', default=0., type=float)
args = parse_args(parser)
main(args)
| mit |
ilo10/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
nixingyang/Kaggle-Face-Verification | Titanic/ensemble.py | 1 | 1573 | import file_operations
import glob
import numpy as np
import os
import pandas as pd
import solution
import time
OLD_SUBMISSION_FOLDER_PATH = solution.SUBMISSION_FOLDER_PATH
NEW_SUBMISSION_FOLDER_PATH = "./"
def perform_ensembling(low_threshold, high_threshold):
print("Reading the submission files from disk ...")
prediction_list = []
for submission_file_path in glob.glob(os.path.join(OLD_SUBMISSION_FOLDER_PATH, "*.csv")):
if os.path.basename(submission_file_path) < "Aurora_{:.4f}".format(low_threshold) or \
os.path.basename(submission_file_path) > "Aurora_{:.4f}".format(high_threshold):
continue
submission_file_content = pd.read_csv(submission_file_path)
prediction = submission_file_content[file_operations.LABEL_COLUMN_NAME_IN_SUBMISSION].as_matrix()
prediction_list.append(prediction)
print("Writing the submission files to disk ...")
mean_prediction = np.mean(prediction_list, axis=0)
median_prediction = np.median(prediction_list, axis=0)
for bias, prediction in enumerate([mean_prediction, median_prediction]):
submission_file_name = "Ensemble_{:.4f}_to_{:.4f}_{:d}.csv".format(low_threshold, high_threshold, int(time.time()) + bias)
submission_file_path = os.path.join(NEW_SUBMISSION_FOLDER_PATH, submission_file_name)
submission_file_content[file_operations.LABEL_COLUMN_NAME_IN_SUBMISSION] = (prediction > 0.5).astype(np.int)
submission_file_content.to_csv(submission_file_path, index=False)
perform_ensembling(0, 1)
print("All done!")
| mit |
evanbiederstedt/RRBSfun | epiphen/normalCll2.py | 1 | 10302 | import glob
import pandas as pd
import numpy as np
import os
os.chdir("/gpfs/commons/home/biederstedte-934/projects_evan/correct_phylo_files")
cw154cell = glob.glob("binary_position_RRBS_cw154_Tris_protease*")
print(len(cw154cell))
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(cd19cell))
totalfiles = cw154cell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG"
"RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC"
"RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG"
"RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG"
"RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC"
"RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC"
"RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG"
"RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG"
"RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG"
"RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC"
"RRBS_cw154_Tris_protease_CTCTCTAC.GACACG"
"RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC"
"RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC"
"RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC"
"RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG"
"RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG"
"RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG"
"RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC"
"RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC"
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG"
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TGCTGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TGCTGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GACACG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TGCTGC"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_cll_2.phy", header=None, index=None)
print(tott.shape)
| mit |
AlexanderFabisch/scikit-learn | sklearn/cluster/bicluster.py | 66 | 19850 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
bzero/statsmodels | statsmodels/tsa/descriptivestats.py | 33 | 2304 | # -*- coding: utf-8 -*-
"""Descriptive Statistics for Time Series
Created on Sat Oct 30 14:24:08 2010
Author: josef-pktd
License: BSD(3clause)
"""
import numpy as np
from . import stattools as stt
#todo: check subclassing for descriptive stats classes
class TsaDescriptive(object):
'''collection of descriptive statistical methods for time series
'''
def __init__(self, data, label=None, name=''):
self.data = data
self.label = label
self.name = name
def filter(self, num, den):
from scipy.signal import lfilter
xfiltered = lfilter(num, den, self.data)
return self.__class__(xfiltered, self.label, self.name + '_filtered')
def detrend(self, order=1):
from . import tsatools
xdetrended = tsatools.detrend(self.data, order=order)
return self.__class__(xdetrended, self.label, self.name + '_detrended')
def fit(self, order=(1,0,1), **kwds):
from .arima_model import ARMA
self.mod = ARMA(self.data)
self.res = self.mod.fit(order=order, **kwds)
#self.estimated_process =
return self.res
def acf(self, nlags=40):
return stt.acf(self.data, nlags=nlags)
def pacf(self, nlags=40):
return stt.pacf(self.data, nlags=nlags)
def periodogram(self):
#doesn't return frequesncies
return stt.periodogram(self.data)
# copied from fftarma.py
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
data = self.data
acf = self.acf(nacf)
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq, endpoint=False)
spdr = self.periodogram()[:nfreq] #(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
namestr = ' for %s' % self.name if self.name else ''
ax.plot(data)
ax.set_title('Time series' + namestr)
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation' + namestr)
ax = fig.add_subplot(2,2,3)
ax.plot(spdr) # (wr, spdr)
ax.set_title('Power Spectrum' + namestr)
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation' + namestr)
return fig
| bsd-3-clause |
lkishline/expyfun | expyfun/analyze/_analyze.py | 1 | 11928 | """Analysis functions (mostly for psychophysics data).
"""
import warnings
import numpy as np
import scipy.stats as ss
from scipy.optimize import curve_fit
from functools import partial
from collections import namedtuple
def press_times_to_hmfc(presses, targets, foils, tmin, tmax,
return_type='counts'):
"""Convert press times to hits/misses/FA/CR
Parameters
----------
presses : list
List of press times (in seconds).
targets : list
List of target times.
foils : list | None
List of foil (distractor) times.
tmin : float
Minimum time after a target/foil to consider a press.
tmax : float
Maximum time after a target/foil to consider a press.
return_type : str
Currently only ``'counts'`` is supported. Eventually we will
add rection-time support as well.
Returns
-------
hmfco : list
Hits, misses, false alarms, correct rejections, and other presses
(not within the window for a target or a masker).
Notes
-----
Multiple presses within a single "target window" (i.e., between ``tmin``
and ``tmax`` of a target) or "masker window" get treated as a single
press by this function. However, there is no such de-bouncing of responses
to "other" times.
"""
# Sanity check that targets and foils don't overlap (due to tmin/tmax)
targets = np.atleast_1d(targets) + tmin
foils = np.atleast_1d(foils) + tmin
dur = float(tmax - tmin)
assert dur > 0
presses = np.sort(np.atleast_1d(presses))
assert targets.ndim == foils.ndim == presses.ndim == 1
all_times = np.concatenate(([-np.inf], targets, foils, [np.inf]))
order = np.argsort(all_times)
inv_order = np.argsort(order)
all_times = all_times[order]
if not np.all(all_times[:-1] + dur <= all_times[1:]):
raise ValueError('Analysis windows for targets and foils overlap')
# Let's just loop (could probably be done with vector math, but it's
# too hard and unlikely to be correct)
locs = np.searchsorted(all_times, presses, 'right')
if len(locs) > 0:
assert locs.max() < len(all_times) # should be True b/c of np.inf
assert locs.min() >= 1
# figure out which presses were to target or masker (valid_idx)
in_window = (presses <= all_times[locs - 1] + dur)
valid_idx = np.where(in_window)[0]
n_other = np.sum(~in_window)
# figure out which of valid presses were to target or masker
used = np.unique(locs[valid_idx]) # unique to remove double-presses
orig_places = (inv_order[used - 1] - 1)
n_hit = sum(orig_places < len(targets))
n_fa = len(used) - n_hit
n_miss = len(targets) - n_hit
n_cr = len(foils) - n_fa
return n_hit, n_miss, n_fa, n_cr, n_other
def logit(prop, max_events=None):
"""Convert proportion (expressed in the range [0, 1]) to logit.
Parameters
----------
prop : float | array-like
the occurrence proportion.
max_events : int | array-like | None
the number of events used to calculate ``prop``. Used in a correction
factor for cases when ``prop`` is 0 or 1, to prevent returning ``inf``.
If ``None``, no correction is done, and ``inf`` or ``-inf`` may result.
Returns
-------
lgt : ``numpy.ndarray``, with shape matching ``numpy.array(prop).shape``.
"""
prop = np.atleast_1d(prop).astype(float)
if np.any([prop > 1, prop < 0]):
raise ValueError('Proportions must be in the range [0, 1].')
if max_events is not None:
# add equivalent of half an event to 0s, and subtract same from 1s
max_events = np.atleast_1d(max_events) * np.ones_like(prop)
corr_factor = 0.5 / max_events
for loc in zip(*np.where(prop == 0)):
prop[loc] = corr_factor[loc]
for loc in zip(*np.where(prop == 1)):
prop[loc] = 1 - corr_factor[loc]
return np.log(prop / (np.ones_like(prop) - prop))
def sigmoid(x, lower=0., upper=1., midpt=0., slope=1.):
"""Calculate sigmoidal values along the x-axis
Parameters
----------
x : array-like
x-values to calculate the sigmoidal values from.
lower : float
The lower y-asymptote.
upper : float
The upper y-asymptote.
midpt : float
The x-value that obtains 50% between the lower and upper asymptote.
slope : float
The slope of the sigmoid.
Returns
-------
y : array
The y-values of the sigmoid evaluated at x.
"""
x = np.asarray(x)
lower = float(lower)
upper = float(upper)
midpt = float(midpt)
slope = float(slope)
y = (upper - lower) / (1 + np.exp(-slope * (x - midpt))) + lower
return y
def fit_sigmoid(x, y, p0=None, fixed=()):
"""Fit a sigmoid to summary data
Given a set of average values ``y`` (e.g., response probabilities) as a
function of a variable ``x`` (e.g., presented target level), this
will estimate the underlying sigmoidal response. Note that the fitting
function can be sensitive to the shape of the data, so always inspect
your results.
Parameters
----------
x : array-like
x-values along the sigmoid.
y : array-like
y-values at each location in the sigmoid.
p0 : array-like | None
Initial guesses for the fit. Can be None to estimate all parameters,
or members of the array can be None to have these automatically
estimated.
fixed : list of str
Which parameters should be fixed.
Returns
-------
lower, upper, midpt, slope : floats
See expyfun.analyze.sigmoid for descriptions.
"""
# Initial estimates
x = np.asarray(x)
y = np.asarray(y)
k = 2 * 4. / (np.max(x) - np.min(x))
if p0 is None:
p0 = [None] * 4
p0 = list(p0)
for ii, p in enumerate([np.min(y), np.max(y),
np.mean([np.max(x), np.min(x)]), k]):
p0[ii] = p if p0[ii] is None else p0[ii]
p0 = np.array(p0, dtype=np.float64)
if p0.size != 4 or p0.ndim != 1:
raise ValueError('p0 must have 4 elements, or be None')
# Fixing values
p_types = ('lower', 'upper', 'midpt', 'slope')
for f in fixed:
if f not in p_types:
raise ValueError('fixed {0} not in parameter list {1}'
''.format(f, p_types))
fixed = np.array([(True if f in fixed else False) for f in p_types], bool)
kwargs = dict()
idx = list()
keys = list()
for ii, key in enumerate(p_types):
if fixed[ii]:
kwargs[key] = p0[ii]
else:
keys.append(key)
idx.append(ii)
p0 = p0[idx]
if len(idx) == 0:
raise RuntimeError('cannot fit with all fixed values')
def wrapper(*args):
assert len(args) == len(keys) + 1
for key, arg in zip(keys, args[1:]):
kwargs[key] = arg
return sigmoid(args[0], **kwargs)
out = curve_fit(wrapper, x, y, p0=p0)[0]
assert len(idx) == len(out)
for ii, o in zip(idx, out):
kwargs[p_types[ii]] = o
return namedtuple('params', p_types)(**kwargs)
def rt_chisq(x, axis=None):
"""Chi square fit for reaction times (a better summary statistic than mean)
Parameters
----------
x : array-like
Reaction time data to fit.
axis : int | None
The axis along which to calculate the chi-square fit. If none, ``x``
will be flattened before fitting.
Returns
-------
peak : float | array-like
The peak(s) of the fitted chi-square probability density function(s).
Notes
-----
Verify that it worked by plotting pdf vs hist (for 1-dimensional x)::
>>> import numpy as np
>>> from scipy import stats as ss
>>> import matplotlib.pyplot as plt
>>> plt.ion()
>>> x = np.abs(np.random.randn(10000) + 1)
>>> lsp = np.linspace(np.floor(np.amin(x)), np.ceil(np.amax(x)), 100)
>>> df, loc, scale = ss.chi2.fit(x, floc=0)
>>> pdf = ss.chi2.pdf(lsp, df, scale=scale)
>>> plt.plot(lsp, pdf)
>>> plt.hist(x, normed=True)
"""
x = np.asarray(x)
if np.any(np.less(x, 0)): # save the user some pain
raise ValueError('x cannot have negative values')
if axis is None:
df, _, scale = ss.chi2.fit(x, floc=0)
else:
fit = partial(ss.chi2.fit, floc=0)
params = np.apply_along_axis(fit, axis=axis, arr=x) # df, loc, scale
pmut = np.concatenate((np.atleast_1d(axis),
np.delete(np.arange(x.ndim), axis)))
df = np.transpose(params, pmut)[0]
scale = np.transpose(params, pmut)[2]
quartiles = np.percentile(x, (25, 75))
whiskers = quartiles + np.array((-1.5, 1.5)) * np.diff(quartiles)
n_bad = np.sum(np.logical_or(np.less(x, whiskers[0]),
np.greater(x, whiskers[1])))
if n_bad > 0:
warnings.warn('{0} likely bad values in x (of {1})'
''.format(n_bad, x.size))
peak = np.maximum(0, (df - 2)) * scale
return peak
def dprime(hmfc, zero_correction=True):
"""Estimates d-prime, with optional correction factor to avoid infinites.
Parameters
----------
hmfc : array-like
Hits, misses, false-alarms, and correct-rejections, in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
Notes
-----
For two-alternative forced-choice tasks, it is recommended to enter correct
trials as hits and incorrect trials as false alarms, and enter misses and
correct rejections as 0. An alternative is to use ``dprime_2afc()``, which
wraps to ``dprime()`` and does this assignment for you.
"""
hmfc = _check_dprime_inputs(hmfc)
a = 0.5 if zero_correction else 0.0
dp = ss.norm.ppf((hmfc[..., 0] + a) /
(hmfc[..., 0] + hmfc[..., 1] + 2 * a)) - \
ss.norm.ppf((hmfc[..., 2] + a) /
(hmfc[..., 2] + hmfc[..., 3] + 2 * a))
return dp
def dprime_2afc(hm, zero_correction=True):
"""Estimates d-prime for two-alternative forced-choice paradigms.
Parameters
----------
hm : array-like
Correct trials (hits) and incorrect trials (misses), in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
"""
hmfc = _check_dprime_inputs(hm, True)
return dprime(hmfc, zero_correction)
def _check_dprime_inputs(hmfc, tafc=False):
"""Formats input to dprime() and dprime_2afc().
Parameters
----------
hmfc : array-like
Hit, miss, false-alarm, correct-rejection; or hit, miss for 2AFC.
tafc : bool
Is this a 2AFC design?
"""
hmfc = np.asarray(hmfc)
if tafc:
if hmfc.shape[-1] != 2:
raise ValueError('Array must have last dimension 2.')
else:
if hmfc.shape[-1] != 4:
raise ValueError('Array must have last dimension 4')
if tafc:
z = np.zeros(hmfc.shape[:-1] + (4,), hmfc.dtype)
z[..., [0, 2]] = hmfc
hmfc = z
if hmfc.dtype not in (np.int64, np.int32):
warnings.warn('Argument (%s) to dprime() cast to np.int64; floating '
'point values will have been truncated.' % hmfc.dtype)
hmfc = hmfc.astype(np.int64)
return hmfc
| bsd-3-clause |
poryfly/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
dmarx/Topological-Anomaly-Detection | src/tad/demo.py | 1 | 1051 | import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from pandas.tools.plotting import scatter_matrix
from sklearn import datasets
from sklearn.decomposition import PCA
from TADClassifier import tad_classify
iris = datasets.load_iris()
df = pd.DataFrame(iris.data)
res = tad_classify(df.values)
plot = True
if plot:
df['anomaly']=0
outliers_flat = res['scores'].keys()
df.anomaly.ix[outliers_flat] = 1
scatter_matrix(df.ix[:,:4], c=df.anomaly, s=(25 + 50*df.anomaly), alpha=.8)
plt.show()
print 'Anomalies:', res['outliers']
g = res['g']
X_pca = PCA().fit_transform(df)
pos = dict((i,(X_pca[i,0], X_pca[i,1])) for i in range(X_pca.shape[0]))
colors = []
labels = {}
for node in g.nodes():
if node in outliers_flat:
labels[node] = node
colors.append('r')
else:
labels[node] = ''
colors.append('b')
nx.draw(g, pos=pos, node_color = colors)#, labels=labels)
nx.draw_networkx_labels(g,pos,labels)
plt.show() | bsd-3-clause |
lin-credible/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 88 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
rohanp/scikit-learn | examples/manifold/plot_compare_methods.py | 1 | 4032 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.LaplacianEigenmap(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("LaplacianEigenmap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Laplacian Eigenmap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Karel-van-de-Plassche/QLKNN-develop | qlknn/plots/load_data.py | 1 | 3503 | import os
import sys
import numpy as np
import scipy.stats as stats
import pandas as pd
from IPython import embed
from qlknn.NNDB.model import Network, NetworkJSON
from qlknn.models.ffnn import QuaLiKizNDNN
def load_data(id):
store = pd.HDFStore('../7D_nions0_flat.h5')
input = store['megarun1/input']
data = store['megarun1/flattened']
root_name = '/megarun1/nndb_nn/'
query = (Network.select(Network.target_names).where(Network.id == id).tuples()).get()
target_names = query[0]
if len(target_names) == 1:
target_name = target_names[0]
else:
NotImplementedError('Multiple targets not implemented yet')
print(target_name)
parent_name = root_name + target_name + '/'
network_name = parent_name + str(id)
network_name += '_noclip'
nn = load_nn(id)
df = data[target_name].to_frame('target')
df['prediction'] = store[network_name].iloc[:, 0]
df = df.astype('float64')
df['residuals'] = df['target'] - df['prediction']
df['maxgam'] = pd.DataFrame({'leq': data['gam_leq_GB'],
'less': data['gam_less_GB']}).max(axis=1)
return input, df, nn
def load_nn(id):
subquery = (Network.select(NetworkJSON.network_json)
.where(Network.id == id)
.join(NetworkJSON)
.tuples()).get()
json_dict = subquery[0]
nn = QuaLiKizNDNN(json_dict)
return nn
shortname = {'Ate': '$R/L_{T_e}$',
'Ati': '$R/L_{T_i}$'}
longname ={
'Ate': 'Normalized electron temperature gradient $R/L_{T_e}$',
'Ati': 'Normalized ion temperature gradient $R/L_{T_i}$'}
nameconvert = {
'An': '$R/L_n$',
#'Nustar': '$\\nu^*$',
'Nustar': '$log_{10}(\\nu^*)$',
'logNustar': '$log_{10}(\\nu^*)$',
'Ti_Te': 'Relative temperature $T_i/T_e$',
'Zeff': '$Z_{eff}$',
'q': '$q$',
'smag': 'Magnetic shear $\hat{s}$',
'x': '$\\varepsilon\,(r/R)$',
'efe_GB': '$q_e\,[GB]$',
'efi_GB': '$q_i\,[GB]$',
'efiITG_GB': '$q_{ITG, i}\,[GB]$',
'efeITG_GB': '$q_{ITG, e}\,[GB]$',
'efiTEM_GB': '$q_{TEM, i}\,[GB]$',
'efeTEM_GB': '$q_{TEM, e}\,[GB]$',
'efeETG_GB': 'Normalized heat flux $q$',
'pfe_GB': '$\Gamma_e\,[GB]$',
'pfi_GB': '$\Gamma_i\,[GB]$',
'pfeITG_GB': '$\Gamma_{ITG, i}\,[GB]$',
'pfeTEM_GB': '$\Gamma_{TEM, i}\,[GB]$',
'gam_leq_GB': '$\gamma_{max, \leq 2}\,[GB]$'
}
comboname = {
'efiTEM_GB_div_efeTEM_GB': nameconvert['efiTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'pfeTEM_GB_div_efeTEM_GB': nameconvert['pfeTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'efeITG_GB_div_efiITG_GB': nameconvert['efeITG_GB'] + '/' + nameconvert['efiITG_GB'],
'pfeITG_GB_div_efiITG_GB': nameconvert['pfeITG_GB'] + '/' + nameconvert['efiITG_GB']
}
nameconvert.update(shortname)
nameconvert.update(comboname)
def prettify_df(input, data):
try:
del input['nions']
except KeyError:
pass
for ii, col in enumerate(input):
if col == u'Nustar':
input[col] = input[col].apply(np.log10)
#se = input[col]
#se.name = nameconvert[se.name]
input['x'] = (input['x'] / 3)
input.rename(columns=nameconvert, inplace=True)
data.rename(columns=nameconvert, inplace=True)
#for ii, col in enumerate(data):
# se = data[col]
# try:
# se.name = nameconvert[se.name]
# except KeyError:
# warn('Did not translate name for ' + se.name)
return input, data
| mit |
aitoralmeida/networkx | examples/graph/napoleon_russian_campaign.py | 44 | 3216 | #!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
| bsd-3-clause |
EnvGen/toolbox | scripts/rpkm_annotations_table.py | 1 | 1213 | #!/usr/bin/env python
"""A script to sum the values for all genes for each annotation."""
import pandas as pd
import argparse
import sys
def main(args):
rpkm_table =pd.read_table(args.rpkm_table, index_col=0)
annotations = pd.read_table(args.annotation_table, header=None, names=["gene_id", "annotation", "evalue", "score"])
annotation_rpkm = {}
for annotation, annotation_df in annotations.groupby('annotation'):
annotation_rpkm[annotation] = rpkm_table.ix[annotation_df.gene_id].sum()
annotation_rpkm_df = pd.DataFrame.from_dict(annotation_rpkm, orient='index')
# The output columns should be sorted but with gene_length first
columns = sorted(rpkm_table.columns)
if 'gene_length' in rpkm_table.columns:
columns.remove('gene_length')
columns = ['gene_length'] + columns
# sort the columns of the dataframe
annotation_rpkm_df = annotation_rpkm_df.reindex(columns=columns)
annotation_rpkm_df.to_csv(sys.stdout, sep='\t')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("rpkm_table")
parser.add_argument("annotation_table")
args = parser.parse_args()
main(args)
| mit |
fyffyt/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
HPI-SWA-Lab/TargetSpecific-ICOOOLPS | resources/benchmark.py | 1 | 2188 | import numpy as np
import matplotlib.pyplot as plt
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.4 # the width of the bars
fig, ax = plt.subplots()
ax.set_ylim(0,11) # outliers only
#ax2.set_ylim(0,35) # most of the data
#ax.spines['bottom'].set_visible(False)
#ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
#ax.tick_params(labeltop='off') # don't put tick labels at the top
ax.xaxis.tick_bottom()
fig.subplots_adjust(hspace=0.1)
# call-site-specific
noneV = (5.729, 6.966, 7.953, 8.524)
rectsNone = ax.bar(ind, noneV, width, color='w', hatch=' ')
#ax2.bar(ind, noneV, width, color='w')
# call-target-specific uncached
classCached = (2.560, 3.616, 5.357, 6.846)
rectsClassCached = ax.bar(ind+width, classCached, width, color='w', hatch='o')
#ax2.bar(ind+width, classCached, width, color='w', hatch='/')
# call-target-specific cached
#classUncached = (2.634, 3.358, 5.583, 6.838)
#rectsClassUncached = ax.bar(ind+2*width, classUncached, width, color='w', hatch='o')
#ax2.bar(ind+2*width, classUncached, width, color='w', hatch='o')
# add some text for labels, title and axes ticks
#ax2.set_ylabel('Runtime (ms)')
#ax.set_title('Average rendering runtime per frame')
ax.set_ylabel('Runtime (s) / 100.000 invocations')
ax.set_xticks(ind+width+0.14)
ax.set_xticklabels( ('(a) 1 target \n (10 kwargs)', '(b) 2 targets \n (10 kwargs; \n 10 kwargs)', '(c) 2 targets \n (10 kwargs; \n 5 kwargs + rest kwargs)', '(d) 1 target \n (5 kwargs + rest kwargs)') )
#ax2.set_yticks(ax2.get_yticks()[:-1])
ax.set_yticks(ax.get_yticks()[1:])
ax.legend( (rectsNone[0], rectsClassCached[0]), ('call-site-specific', 'call-target-specific') , loc=4)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if height == 0:
ax.text(rect.get_x()+rect.get_width()/2., height+2, 'n/a',
ha='center', va='bottom', rotation='vertical')
else:
ax.text(rect.get_x()+rect.get_width()/2., height+0.2, '%.2f'%float(height),
ha='center', va='bottom', rotation='vertical')
autolabel(rectsNone)
autolabel(rectsClassCached)
plt.show() | mit |
manjunaths/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 9 | 8252 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
if __name__ == '__main__':
test.main()
| apache-2.0 |
rhattersley/cartopy | lib/cartopy/examples/waves.py | 4 | 1064 | """
Filled contours
---------------
An example of contourf on manufactured data.
"""
__tags__ = ['Scalar data']
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
def sample_data(shape=(73, 145)):
"""Return ``lons``, ``lats`` and ``data`` of some fake data."""
nlats, nlons = shape
lats = np.linspace(-np.pi / 2, np.pi / 2, nlats)
lons = np.linspace(0, 2 * np.pi, nlons)
lons, lats = np.meshgrid(lons, lats)
wave = 0.75 * (np.sin(2 * lats) ** 8) * np.cos(4 * lons)
mean = 0.5 * np.cos(2 * lats) * ((np.sin(2 * lats)) ** 2 + 2)
lats = np.rad2deg(lats)
lons = np.rad2deg(lons)
data = wave + mean
return lons, lats, data
def main():
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mollweide())
lons, lats, data = sample_data()
ax.contourf(lons, lats, data,
transform=ccrs.PlateCarree(),
cmap='nipy_spectral')
ax.coastlines()
ax.set_global()
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
philrosenfield/padova_tracks | graphics/kippenhahn.py | 1 | 5867 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from .graphics import annotate_plot
from ..config import logT, ycen, xc_cen, xo_cen, age
from ..eep.critical_point import Eep
from ..utils import add_ptcris
def kippenhahn(track, col_keys=None, heb_only=True, ptcri=None,
four_tops=False, xscale='linear', between_ptcris=[0, -2],
khd_dict=None, ax=None, norm=None, annotate=False,
legend=False, fusion=True, convection=True):
pinds = add_ptcris(track, between_ptcris)
norm = norm or ''
if heb_only:
# Core HeB:
inds, = np.nonzero((track.data['LY'] > 0) & (track.data.QHE1 == 0))
else:
inds = np.arange(pinds[pinds > 0][0], pinds[pinds > 0][-1])
pinds = add_ptcris(track, between_ptcris)
xdata = track.data[age][inds]
if xscale == 'linear':
# AGE IN Myr
xdata /= 1e6
xlab = r'$\rm{Age (Myr)}$'
elif 'x' in norm:
xdata = xdata/np.max(xdata)
xlab = r'$\rm{fractional Age}$'
else:
xlab = r'$\log \rm{Age (yr)}$'
if four_tops:
track.calc_core_mu()
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True,
figsize=(8, 8))
gs = gridspec.GridSpec(8, 2)
sm_axs = [plt.subplot(gs[i, 0:]) for i in range(4)]
ax = plt.subplot(gs[4:, 0:])
ycols = [logT, '', 'LOG_RHc', 'LOG_Pc']
ycolls = ['$\log T_{eff}$', '$\mu_c$', '$\\rho_c$', '$\log P_c$']
for smax, ycol, ycoll in zip(sm_axs, ycols, ycolls):
if len(ycol) == 0:
ydata = track.muc[inds]
else:
ydata = track.data[ycol][inds]
smax.plot(xdata, ydata, lw=3, color='black', label=ycoll)
smax.plot(xdata, ydata, lw=3, color='black', label=ycoll)
smax.set_ylabel('$%s$' % ycoll)
smax.set_ylim(np.min(ydata), np.max(ydata))
smax.yaxis.set_major_locator(MaxNLocator(4))
smax.xaxis.set_major_formatter(NullFormatter())
axs = np.concatenate([[ax], sm_axs])
else:
if ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
axs = [ax]
ax.set_xscale(xscale)
# discontinuities in conv...
p1 = np.argmin((np.diff(track.data.CF1[inds])))
p2 = np.argmax(np.diff(track.data.CF1[inds]))
# convections
fbkw = {'edgecolor': 'none', 'alpha': 0.4, 'zorder': 1}
conv_kw = fbkw.copy()
conv_kw['color'] = 'grey'
if convection:
ax.fill_between(xdata[:p1],
track.data.CI1[inds[:p1]],
track.data.CF1[inds[:p1]],
where=track.data.CF1[inds[:p1]] > 0.2,
**conv_kw)
ax.fill_between(xdata[p2:],
track.data.CI1[inds[p2:]],
track.data.CF1[inds[p2:]],
where=track.data.CF1[inds[p2:]] < 0.2,
**conv_kw)
ax.fill_between(xdata,
track.data.CI2[inds],
track.data.CF2[inds],
**conv_kw)
if fusion:
ax.fill_between(xdata,
track.data.QH1[inds],
track.data.QH2[inds],
color='navy', label=r'$H$', **fbkw)
ax.fill_between(xdata,
track.data.QHE1[inds],
track.data.QHE2[inds],
color='darkred', label=r'$^4He$', **fbkw)
zorder = 100
if khd_dict is None:
khd_dict = {xc_cen: 'darkgreen',
xo_cen: 'purple',
ycen: 'orange',
'LX': 'navy',
'LY': 'darkred',
'CONV': 'black'}
# white underneath
[ax.plot(xdata, track.data[column][inds], lw=5, color='white')
for column in list(khd_dict.keys())]
zorder += 10
for col, color in list(khd_dict.items()):
ax.plot(xdata, track.data[col][inds], ls=plot_linestyles(col),
lw=3, color=color, label=plot_labels(col), zorder=zorder)
zorder += 10
ixmax = p1 + np.argmax(track.data[logT][inds[p1:]])
if legend:
ax.legend(frameon=False, loc=0)
ax.set_ylim(0, 1)
ax.set_xlabel(xlab, fontsize=18)
ax.set_ylabel('$m/M\ or\ f/f_{tot}$', fontsize=18)
if annotate:
ptcri_names = \
Eep().eep_list[between_ptcris[0]: between_ptcris[1] + 1]
self.annotate_plot(track, ax, '', '', xdata=xdata, ydata=xdata,
ptcri_names=ptcri_names, khd=True, inds=inds,
lw=2)
# [a.set_xlim(xdata[pinds[0]], xdata[pinds[-1]]) for a in axs]
# for a in axs:
# a.set_xlim(xdata[0], xdata[-1])
# ylim = a.get_ylim()
# [a.vlines(xdata[i], *ylim, color='grey', lw=2)
# for i in [p1, itmax]]
# a.set_ylim(ylim)
return axs
def plot_labels(column):
if column == xc_cen:
lab = '$^{12}C$'
elif column == xo_cen:
lab = '$^{16}O$'
elif column == 'CONV':
lab = r'$\rm{core}$'
elif 'CEN' in column.upper():
lab = '$%s$' % column.upper().replace('CEN', '_c')
elif 'L' in column and len(column) == 2:
lab = '$%s$' % '_'.join(column)
else:
print(('%s label format not supported' % column))
lab = column
return lab
def plot_linestyles(column):
if column == xc_cen:
ls = '-'
elif column == xo_cen:
ls = '-'
elif column == 'CONV':
ls = '-'
elif 'CEN' in column.upper():
ls = '-'
elif 'L' in column and len(column) == 2:
ls = '--'
else:
print(('%s line_style format not supported' % column))
ls = '-'
return ls
| mit |
mayblue9/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
mirams/PyHillFit | python/PyHillTemp.py | 1 | 11388 | import doseresponse as dr
import argparse
import numpy as np
import sys
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.stats as st
import itertools as it
import multiprocessing as mp
import time
#import warnings
#warnings.filterwarnings("error")
seed = 1
npr.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--iterations", type=int, help="number of MCMC iterations",default=500000)
parser.add_argument("-t", "--thinning", type=int, help="how often to thin the MCMC, i.e. save every t-th iteration",default=5)
parser.add_argument("-b", "--burn-in-fraction", type=int, help="given N saved MCMC iterations, discard the first N/b as burn-in",default=4)
parser.add_argument("-a", "--all", action='store_true', help='run hierarchical MCMC on all drugs and channels', default=False)
parser.add_argument("-nc", "--num-cores", type=int, help="number of cores to parallelise drug/channel combinations",default=1)
parser.add_argument("-Ne", "--num_expts", type=int, help="how many experiments to fit to", default=0)
parser.add_argument("--num-APs", type=int, help="how many (alpha,mu) samples to take for AP simulations", default=500)
parser.add_argument("--single", action='store_true', help="run single-level MCMC algorithm",default=True)
parser.add_argument("--hierarchical", action='store_true', help="run hierarchical MCMC algorithm",default=False)
parser.add_argument("--fix-hill", action='store_true', help="fix Hill=1 through fitting and MCMC",default=False)
parser.add_argument("-bfo", "--best-fit-only", action='store_true', help="only do CMA-ES best fit, then quit",default=False)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("--data-file", type=str, help="csv file from which to read in data, in same format as provided crumb_data.csv", required=True)
requiredNamed.add_argument("-m", "--model", type=int, help="For non-hierarchical (put anything for hierarchical):1. fix Hill=1; 2. vary Hill", required=True)
requiredNamed.add_argument("-d", "--drug", type=int, help="drug index", required=True)
requiredNamed.add_argument("-c", "--channel", type=int, help="channel index", required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
dr.define_model(args.model)
num_params = dr.num_params
dr.setup(args.data_file)
#drugs_to_run, channels_to_run = dr.list_drug_channel_options(args.all)
drugs_to_run = [dr.drugs[args.drug]]
channels_to_run = [dr.channels[args.channel]]
def do_mcmc(temperature):#, theta0):
print "Starting chain"
print "\nnum_params: {}\n".format(num_params)
#theta_cur = np.copy(theta0)
theta_cur = np.ones(num_params)
print "theta_cur:", theta_cur
log_target_cur = dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, theta_cur, temperature, pi_bit)
print "log_target_cur:", log_target_cur
total_iterations = args.iterations
thinning = args.thinning
num_saved = total_iterations / thinning + 1
burn = num_saved / args.burn_in_fraction
chain = np.zeros((num_saved, num_params+1))
chain[0, :] = np.concatenate((theta_cur, [log_target_cur]))
loga = 0.
acceptance = 0.
mean_estimate = np.copy(theta_cur)
cov_estimate = np.eye(num_params)
status_when = 5000
adapt_when = 1000*num_params
t = 1
s = 1
while t <= total_iterations:
theta_star = npr.multivariate_normal(theta_cur, np.exp(loga)*cov_estimate)
"""try:
theta_star = npr.multivariate_normal(theta_cur, np.exp(loga)*cov_estimate)
except Warning as e:
print str(e)
print "Iteration:", t
print "temperature:", temperature
print "theta_cur:", theta_cur
print "loga:", loga
print "cov_estimate:", cov_estimate
sys.exit()"""
log_target_star = dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, theta_star, temperature, pi_bit)
u = npr.rand()
if np.log(u) < log_target_star - log_target_cur:
accepted = 1
theta_cur = theta_star
log_target_cur = log_target_star
else:
accepted = 0
acceptance = (t-1.)/t * acceptance + 1./t * accepted
if t % thinning == 0:
chain[t/thinning,:] = np.concatenate((theta_cur, [log_target_cur]))
if t % status_when == 0:
#pass
print t/status_when, "/", total_iterations/status_when
print "acceptance =", acceptance
if t == adapt_when:
mean_estimate = np.copy(theta_cur)
if t > adapt_when:
gamma_s = 1./(s+1.)**0.6
temp_covariance_bit = np.array([theta_cur-mean_estimate])
cov_estimate = (1-gamma_s) * cov_estimate + gamma_s * np.dot(np.transpose(temp_covariance_bit),temp_covariance_bit)
mean_estimate = (1-gamma_s) * mean_estimate + gamma_s * theta_cur
loga += gamma_s*(accepted-0.25)
s += 1
t += 1
# discard burn-in before saving chain, just to save space mostly
return chain[burn:, :]
for drug,channel in it.product(drugs_to_run, channels_to_run):
num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug, channel)
concs = np.array([])
responses = np.array([])
for i in xrange(num_expts):
concs = np.concatenate((concs, experiments[i][:, 0]))
responses = np.concatenate((responses, experiments[i][:, 1]))
where_r_0 = responses==0
where_r_100 = responses==100
where_r_other = (0<responses) & (responses<100)
print "where_r_0:", where_r_0
print "where_r_100:", where_r_100
print "where_r_other:", where_r_other
pi_bit = dr.compute_pi_bit_of_log_likelihood(where_r_other)
#model = 2 #int(sys.argv[1])
temperatures = (np.arange(dr.n+1.)/dr.n)**dr.c
print "\nDoing temperatures: {}\n".format(temperatures)
start = time.time()
if args.num_cores>1:
pool = mp.Pool(args.num_cores)
chains = pool.map_async(do_mcmc,temperatures).get(99999)
pool.close()
pool.join()
else:
chains = [do_mcmc(t) for t in temperatures]
mcmc_time = time.time()-start
print "\nMCMC time: {} s\n".format(int(mcmc_time))
for i, temperature in enumerate(temperatures):
drug,channel,chain_file,images_dir = dr.nonhierarchical_chain_file_and_figs_dir(args.model, drug, channel, temperature)
print "chain_file:", chain_file
chain = chains[i]
np.savetxt(chain_file, chain)
saved_iterations, num_params_plus_one = chain.shape
figs = []
axs = []
# plot all marginal posterior distributions
for i in range(num_params):
figs.append(plt.figure())
axs.append([])
axs[i].append(figs[i].add_subplot(211))
axs[i][0].hist(chain[:,i], bins=40, normed=True, color='blue', edgecolor='blue')
axs[i][0].legend()
axs[i][0].set_title("MCMC marginal distributions")
axs[i][0].set_ylabel("Normalised frequency")
axs[i][0].grid()
plt.setp(axs[i][0].get_xticklabels(), visible=False)
axs[i].append(figs[i].add_subplot(212,sharex=axs[i][0]))
axs[i][1].plot(chain[:,i],range(saved_iterations))
axs[i][1].invert_yaxis()
axs[i][1].set_xlabel(dr.labels[i])
axs[i][1].set_ylabel('Saved MCMC iteration')
axs[i][1].grid()
figs[i].tight_layout()
figs[i].savefig(images_dir+'{}_{}_{}_marginal.png'.format(drug,channel,dr.file_labels[i]))
plt.close()
# plot log-target path
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax3.plot(range(saved_iterations), chain[:,-1])
ax3.set_xlabel('MCMC iteration')
ax3.set_ylabel('log-target')
ax3.grid()
fig2.tight_layout()
fig2.savefig(images_dir+'log_target.png')
plt.close()
# plot scatterplot matrix of posterior(s)
colormin, colormax = 1e9,0
norm = matplotlib.colors.Normalize(vmin=5,vmax=10)
hidden_labels = []
count = 0
# there's probably a better way to do this
# I plot all the histograms to normalize the colours, in an attempt to give a better comparison between the pairwise plots
while count < 2:
axes = {}
matrix_fig = plt.figure(figsize=(3*num_params,3*num_params))
for i in range(num_params):
for j in range(i+1):
ij = str(i)+str(j)
subplot_position = num_params*i+j+1
if i==j:
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position)
axes[ij].hist(chain[:,i],bins=50,normed=True,color='blue', edgecolor='blue')
elif j==0: # this column shares x-axis with top-left
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position,sharex=axes["00"])
counts, xedges, yedges, Image = axes[ij].hist2d(chain[:,j],chain[:,i],cmap='hot_r',bins=50,norm=norm)
maxcounts = np.amax(counts)
if maxcounts > colormax:
colormax = maxcounts
mincounts = np.amin(counts)
if mincounts < colormin:
colormin = mincounts
else:
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position,sharex=axes[str(j)+str(j)],sharey=axes[str(i)+"0"])
counts, xedges, yedges, Image = axes[ij].hist2d(chain[:,j],chain[:,i],cmap='hot_r',bins=50,norm=norm)
maxcounts = np.amax(counts)
if maxcounts > colormax:
colormax = maxcounts
mincounts = np.amin(counts)
if mincounts < colormin:
colormin = mincounts
axes[ij].xaxis.grid()
if (i!=j):
axes[ij].yaxis.grid()
if i!=num_params-1:
hidden_labels.append(axes[ij].get_xticklabels())
if j!=0:
hidden_labels.append(axes[ij].get_yticklabels())
if i==j==0:
hidden_labels.append(axes[ij].get_yticklabels())
if i==num_params-1:
axes[str(i)+str(j)].set_xlabel(dr.labels[j])
if j==0 and i>0:
axes[str(i)+str(j)].set_ylabel(dr.labels[i])
plt.xticks(rotation=30)
norm = matplotlib.colors.Normalize(vmin=colormin,vmax=colormax)
count += 1
plt.setp(hidden_labels, visible=False)
matrix_fig.tight_layout()
matrix_fig.savefig(images_dir+"{}_{}_temp_{}_scatterplot_matrix.png".format(drug,channel,temperature))
#matrix_fig.savefig(images_dir+"{}_{}_temp_{}_scatterplot_matrix.pdf".format(drug,channel,temperature))
plt.close()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.