repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
willu47/pyrate | pyrate/repositories/aisdb.py | 1 | 11379 | from pyrate.repositories import sql
import psycopg2
import logging
try:
import pandas as pd
except ImportError:
logging.warn("No pandas found")
pd = None
EXPORT_COMMANDS = [('status', 'report status of this repository.'),
('create', 'create the repository.'),
('truncate', 'delete all data in this repository.')]
def load(options, readonly=False):
return AISdb(options, readonly)
class AISdb(sql.PgsqlRepository):
double_type = 'double precision'
clean_db_spec = {
'cols': [
('MMSI', 'integer'),
('Time', 'timestamp without time zone'),
('Message_ID', 'integer'),
('Navigational_status', 'integer'),
('SOG', double_type),
('Longitude', double_type),
('Latitude', double_type),
('COG', double_type),
('Heading', double_type),
('IMO', 'integer null'),
('Draught', double_type),
('Destination', 'character varying(255)'),
('Vessel_Name', 'character varying(255)'),
('ETA_month', 'integer'),
('ETA_day', 'integer'),
('ETA_hour', 'integer'),
('ETA_minute', 'integer'),
('source', 'smallint'),
('ID', 'SERIAL PRIMARY KEY')
],
'indices': [
('dt_idx', ['Time']),
('imo_idx', ['IMO']),
('lonlat_idx', ['Longitude', 'Latitude']),
('mmsi_idx', ['MMSI']),
('msg_idx', ['Message_ID']),
('source_idx', ['source']),
('mmsi_imo_idx', ['MMSI','IMO'])
]
}
dirty_db_spec = {
'cols': [
('MMSI', 'bigint'),
('Time', 'timestamp without time zone'),
('Message_ID', 'integer'),
('Navigational_status', 'integer'),
('SOG', double_type),
('Longitude', double_type),
('Latitude', double_type),
('COG', double_type),
('Heading', double_type),
('IMO', 'integer null'),
('Draught', double_type),
('Destination', 'character varying(255)'),
('Vessel_Name', 'character varying(255)'),
('ETA_month', 'integer'),
('ETA_day', 'integer'),
('ETA_hour', 'integer'),
('ETA_minute', 'integer'),
('source', 'smallint'),
('ID', 'SERIAL PRIMARY KEY')
],
'indices': [
('dt_idx', ['Time']),
('imo_idx', ['IMO']),
('lonlat_idx', ['Longitude', 'Latitude']),
('mmsi_idx', ['MMSI']),
('msg_idx', ['Message_ID']),
('source_idx', ['source']),
('mmsi_imo_idx', ['MMSI','IMO'])
]
}
sources_db_spec = {
'cols': [
('ID', 'SERIAL PRIMARY KEY'),
('timestamp', 'timestamp without time zone DEFAULT now()'),
('filename', 'TEXT'),
('ext', 'TEXT'),
('invalid', 'integer'),
('clean', 'integer'),
('dirty', 'integer'),
('source', 'integer')
]
}
imolist_db_spec = {
'cols': [
('mmsi', 'integer NOT NULL'),
('imo', 'integer NULL'),
('first_seen', 'timestamp without time zone'),
('last_seen', 'timestamp without time zone')
],
'constraint': ['CONSTRAINT imo_list_key UNIQUE (mmsi, imo)']
}
clean_imo_list = {
'cols': imolist_db_spec['cols'],
'constraint': ['CONSTRAINT imo_list_pkey PRIMARY KEY (mmsi, imo)']
}
action_log_spec = {
'cols': [
('timestamp', 'timestamp without time zone DEFAULT now()'),
('action', 'TEXT'),
('mmsi', 'integer NOT NULL'),
('ts_from', 'timestamp without time zone'),
('ts_to', 'timestamp without time zone'),
('count', 'integer NULL')
],
'indices': [
('ts_idx', ['timestamp']),
('action_idx', ['action']),
('mmsi_idx', ['mmsi'])
],
'constraint': ['CONSTRAINT action_log_pkey PRIMARY KEY (timestamp, action, mmsi)']
}
def __init__(self, options, readonly=False):
super(AISdb, self).__init__(options, readonly)
self.clean = sql.Table(self, 'ais_clean', self.clean_db_spec['cols'],
self.clean_db_spec['indices'])
self.dirty = sql.Table(self, 'ais_dirty', self.dirty_db_spec['cols'],
self.dirty_db_spec['indices'])
self.sources = sql.Table(self, 'ais_sources', self.sources_db_spec['cols'])
self.imolist = sql.Table(self, 'imo_list', self.imolist_db_spec['cols'],
constraint=self.imolist_db_spec['constraint'])
self.extended = AISExtendedTable(self)
self.clean_imolist = sql.Table(self, 'imo_list_clean', self.clean_imo_list['cols'], constraint=self.clean_imo_list['constraint'])
self.action_log = sql.Table(self, 'action_log', self.action_log_spec['cols'], self.action_log_spec['indices'], constraint=self.action_log_spec['constraint'])
self.tables = [self.clean, self.dirty, self.sources, self.imolist, self.extended, self.clean_imolist, self.action_log]
def status(self):
print("Status of PGSql database "+ self.db +":")
for tb in self.tables:
s = tb.status()
if s >= 0:
print("Table {}: {} rows.".format(tb.get_name(), s))
else:
print("Table {}: not yet created.".format(tb.get_name()))
def create(self):
"""Create the tables for the AIS data."""
for tb in self.tables:
tb.create()
def truncate(self):
"""Delete all data in the AIS table."""
for tb in self.tables:
tb.truncate()
def ship_info(self, imo):
with self.conn.cursor() as cur:
cur.execute("select vessel_name, MIN(time), MAX(time) from ais_clean where message_id = 5 and imo = %s GROUP BY vessel_name", [imo])
for row in cur:
print("Vessel: {} ({} - {})".format(*row))
cur.execute("select mmsi, first_seen, last_seen from imo_list where imo = %s", [imo])
for row in cur:
print("MMSI = {} ({} - {})".format(*row))
def get_messages_for_vessel(self, imo, from_ts=None, to_ts=None, use_clean_db=False, as_df=False):
if use_clean_db:
imo_list = self.imolist
else:
imo_list = self.clean_imolist
where = ["imo = {}"]
params = [imo]
#Amended EOK - no time field in this table
# if not from_ts is None:
# where.append("time >= {}")
# params.append(from_ts)
# if not to_ts is None:
# where.append("time <= {}")
# params.append(to_ts)
with self.conn.cursor() as cur:
cur.execute("select mmsi, first_seen, last_seen from {} where {}".format(imo_list.name, ' AND '.join(where)).format(*params))
msg_stream = None
# get data for each of this ship's mmsi numbers, and concat
for mmsi, first, last in cur:
stream = self.get_message_stream(mmsi, from_ts=first, to_ts=last, use_clean_db=use_clean_db, as_df=as_df)
if msg_stream is None:
msg_stream = stream
else:
msg_stream = msg_stream + stream
return msg_stream
def get_message_stream(self, mmsi, from_ts=None, to_ts=None, use_clean_db=False, as_df=False):
"""Gets the stream of messages for the given mmsi, ordered by timestamp ascending"""
# construct db query
if use_clean_db:
db = self.clean
else:
db = self.extended
where = ["mmsi = %s"]
params = [mmsi]
if not from_ts is None:
where.append("time >= %s")
params.append(from_ts)
if not to_ts is None:
where.append("time <= %s")
params.append(to_ts)
cols_list = ','.join([c[0].lower() for c in db.cols])
where_clause = ' AND '.join(where)
sql = "SELECT {} FROM {} WHERE {} ORDER BY time ASC".format(cols_list,
db.get_name(), where_clause)
if as_df:
if pd is None:
raise RuntimeError("Pandas not found, cannot create dataframe")
# create pandas dataframe
with self.conn.cursor() as cur:
full_sql = cur.mogrify(sql, params).decode('ascii')
return pd.read_sql(full_sql, self.conn, index_col='time', parse_dates=['time'])
else:
with self.conn.cursor() as cur:
cur.execute(sql, params)
msg_stream = []
# convert tuples from db cursor into dicts
for row in cur:
message = {}
for i, col in enumerate(db.cols):
message[col[0]] = row[i]
msg_stream.append(message)
return msg_stream
class AISExtendedTable(sql.Table):
def __init__(self, db):
super(AISExtendedTable, self).__init__(db, 'ais_extended',
AISdb.clean_db_spec['cols'] + [('location', 'geography(POINT, 4326)')],
AISdb.clean_db_spec['indices'])
def create(self):
with self.db.conn.cursor() as cur:
cur.execute("CREATE EXTENSION IF NOT EXISTS postgis")
super(AISExtendedTable, self).create()
with self.db.conn.cursor() as cur:
# trigger for GIS location generation
try:
cur.execute("""CREATE OR REPLACE FUNCTION location_insert() RETURNS trigger AS '
BEGIN
NEW."location" := ST_SetSRID(ST_MakePoint(NEW.longitude, NEW.latitude),4326);
RETURN NEW;
END;
' LANGUAGE plpgsql;
CREATE TRIGGER {0}_gis_insert
BEFORE INSERT OR UPDATE ON {0} FOR EACH ROW EXECUTE PROCEDURE location_insert();
""".format(self.name))
except psycopg2.ProgrammingError:
logging.info("{}_gis_insert already exists".format(self.name))
self.db.conn.rollback()
self.db.conn.commit()
def create_indices(self):
with self.db.conn.cursor() as cur:
idxn = self.name.lower() + "_location_idx"
try:
logging.info("CREATING GIST INDEX "+ idxn + " on table "+ self.name)
cur.execute("CREATE INDEX \""+ idxn +"\" ON \"" + self.name +"\" USING GIST(\"location\")")
except psycopg2.ProgrammingError:
logging.info("Index "+ idxn +" already exists")
self.db.conn.rollback()
super(AISExtendedTable, self).create_indices()
def drop_indices(self):
with self.db.conn.cursor() as cur:
tbl = self.name
idxn = tbl.lower() + "_location_idx"
logging.info("Dropping index: "+ idxn + " on table "+ tbl)
cur.execute("DROP INDEX IF EXISTS \""+ idxn +"\"")
super(AISExtendedTable, self).drop_indices()
| mit |
varun-rajan/python-modules | lineplot.py | 1 | 3110 | import matplotlib as mpl
import matplotlib.pyplot as plt
import mydictionaries as Mdict
colors = ['k','r','b','g','m','c','y']
linestyles = ['-','--','-.',':']
def linePlot(data,linedict=None,scale=[1,1],xlabel=None,ylabel=None,axis=None,legendlabels=None,legendlocation=2,fontsize1=21,fontsize2=18,ticksize=[8,2],figuresize=[8,6],font='Arial',fignum=1):
plt.figure(fignum,figsize=figuresize)
plt.clf()
preProcessPlot(font,figuresize)
if linedict is None:
linedict = lineDictStandard()
for i, datacurr in enumerate(data):
linedictcurr = Mdict.writeNewDict(linedict,i)
if len(datacurr) == 2: # x and y specified individually
line, = plt.plot(scale[0]*datacurr[0],scale[1]*datacurr[1],**linedictcurr)
else: # in single array
line, = plt.plot(scale[0]*datacurr[:,0],scale[1]*datacurr[:,1],**linedictcurr)
if legendlabels is not None:
line.set_label(legendlabels[i])
postProcessPlot(xlabel,ylabel,axis,legendlabels,legendlocation,fontsize1,fontsize2,ticksize)
def preProcessPlot(font,figuresize):
mpl.rcParams['font.sans-serif'] = font
mpl.rcParams['pdf.fonttype'] = 42
def postProcessPlot(xlabel,ylabel,axis,legendlabels,legendlocation,fontsize1,fontsize2,ticksize):
if xlabel is not None:
plt.xlabel(xlabel,fontsize=fontsize1)
if ylabel is not None:
plt.ylabel(ylabel,fontsize=fontsize1)
if axis is not None:
plt.axis(axis)
plt.legend(loc=legendlocation,fontsize=fontsize2)
plt.tick_params(axis='both',labelsize=fontsize2,width=ticksize[1],length=ticksize[0])
plt.tight_layout()
plt.show()
def genLabels(numvec,label):
return [label + ' = ' + str(num) for num in numvec]
def alternatingList(stringlist,n,nuniques):
return stringlist[0:nuniques]*(n//nuniques)
def alternatingList2(stringlist,n,nuniques):
listfinal = []
for i in range(nuniques):
listfinal = listfinal + stringlist[i:i+1]*(n//nuniques)
return listfinal
def lineDictStandard(color=colors*10,linestyle=linestyles*10,marker='',linewidth=2.5,markersize=10): # every line is a different color and linetype, no markers
keys = ['color','linestyle','marker','linewidth','markersize']
values = [color,linestyle,marker,linewidth,markersize]
return dict(zip(keys,values))
def lineDictAlt1(n,ncolors): # cycle through colors and linestyles, type 1, no markers
# e.g. color = ['k','k','r','r','b','b'], linestyle = ['-','--','-','--','-','--']
linedict = lineDictStandard()
linedict['color'] = alternatingList2(linedict['color'],n,ncolors)
linedict['linestyle'] = alternatingList(linedict['linestyle'],n,n//ncolors)
return linedict
def lineDictAlt2(n,ncolors): # cycle through colors and linestyles, type 2, no markers
# e.g. color = ['k','r','b','k','r','b'], linestyle = ['-','-','-','--','--','--']
linedict = lineDictStandard()
linedict['color'] = alternatingList(linedict['color'],n,ncolors)
linedict['linestyle'] = alternatingList2(linedict['linestyle'],n,n//ncolors)
return linedict
| gpl-2.0 |
ssaeger/scikit-learn | sklearn/covariance/tests/test_covariance.py | 79 | 12193 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
CforED/Machine-Learning | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
gotomypc/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
terkkila/scikit-learn | sklearn/utils/tests/test_multiclass.py | 72 | 15350 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
daleloogn/singerID-BTechProject-neuralnet | testImage.py | 1 | 1886 | import os
import sys
from numpy import *
from scipy import io
from scipy import ndimage
import matplotlib.pyplot as plt
from pybrain.structure import *
from pybrain.datasets import SupervisedDataSet
from pybrain.utilities import percentError
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.xml.networkreader import NetworkReader
from PIL import Image
def plotData(image):
'''plots the input data '''
# the image matrix will have to be transposed to be viewed correcty
# cmap shows the color map
plt.imshow(image.T, cmap='Greys')
plt.show()
# load data
data = io.loadmat('database/num4040.mat')
size = (40, 40)
X = data['X']
Y = data['Y']
# read test image
im = Image.open("testImages/01.png")
# convert to numpy array
if(len(shape(im)) == 3):
imA = asarray(im, dtype="float")[:,:,1]
else:
imA = asarray(im, dtype="float")
# transform pixel values from 0 to 1 and invert and convert to PIL image
imA = (imA - amin(imA)) / (amax(imA) - amin(imA))
imA = 1 - imA
#im1 = Image.fromarray(imA)
# obtain bounding box for image and crop
im1 = asarray(imA, dtype="float")
im1 = ndimage.grey_dilation(im1, size=(25,25))
im1 = Image.fromarray(im1)
box = (im1).getbbox()
im2 = im1.crop(box)
# resize this cropped bounding box, convert to numpy array
#im2 = asarray(im2, dtype="float")
#im21 = asarray(im2, dtype="float")
#im2 = ndimage.grey_dilation(im21, size=(7,7))
im3 = im2.resize(size)
im3 = asarray(im3, dtype="float")
#im2[im2 > .5] = 1
#im2[im2 < .5] = 0
im3 = 1 - im3.T
im3 = uint8(im3)
plotData(im3)
#im = im / amax(im)
#im[im >= .5] = 1
#im[im < .5] = 0
X1 = im3.reshape((X.shape[1]))
#X1 = reshape(X1, (len(X1)))
net = NetworkReader.readFrom('solutions/netTrain4040.xml')
prediction = net.activate(X1)
net.activate(X1)
p = argmax(prediction, axis=0)
print(prediction)
print("predicted output is \t" + str(p))
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardSoftShear/Shear_Zone_Length/SZ_h_1e3/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
fabioticconi/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
zhaochl/python-utils | agrith_util/k-means/k-means.py | 1 | 2886 | #!/usr/bin/env python
# coding=utf-8
#################################################
# kmeans: k-means cluster
# Author : zouxy
# Date : 2013-12-25
# HomePage : http://blog.csdn.net/zouxy09
# Email : [email protected]
#################################################
from numpy import *
import time
import matplotlib.pyplot as plt
# calculate Euclidean distance
def euclDistance(vector1, vector2):
return sqrt(sum(power(vector2 - vector1, 2)))
# init centroids with random samples
def initCentroids(dataSet, k):
numSamples, dim = dataSet.shape
centroids = zeros((k, dim))
for i in range(k):
index = int(random.uniform(0, numSamples))
centroids[i, :] = dataSet[index, :]
return centroids
# k-means cluster
def kmeans(dataSet, k):
numSamples = dataSet.shape[0]
# first column stores which cluster this sample belongs to,
# second column stores the error between this sample and its centroid
clusterAssment = mat(zeros((numSamples, 2)))
clusterChanged = True
## step 1: init centroids
centroids = initCentroids(dataSet, k)
while clusterChanged:
clusterChanged = False
## for each sample
for i in xrange(numSamples):
minDist = 100000.0
minIndex = 0
## for each centroid
## step 2: find the centroid who is closest
for j in range(k):
distance = euclDistance(centroids[j, :], dataSet[i, :])
if distance < minDist:
minDist = distance
minIndex = j
## step 3: update its cluster
if clusterAssment[i, 0] != minIndex:
clusterChanged = True
clusterAssment[i, :] = minIndex, minDist**2
## step 4: update centroids
for j in range(k):
pointsInCluster = dataSet[nonzero(clusterAssment[:, 0].A == j)[0]]
centroids[j, :] = mean(pointsInCluster, axis = 0)
print 'Congratulations, cluster complete!'
return centroids, clusterAssment
# show your cluster only available with 2-D data
def showCluster(dataSet, k, centroids, clusterAssment):
numSamples, dim = dataSet.shape
if dim != 2:
print "Sorry! I can not draw because the dimension of your data is not 2!"
return 1
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
if k > len(mark):
print "Sorry! Your k is too large! please contact Zouxy"
return 1
# draw all samples
for i in xrange(numSamples):
markIndex = int(clusterAssment[i, 0])
plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex])
mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '<b', 'pb']
# draw the centroids
for i in range(k):
plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize = 12)
plt.show()
| apache-2.0 |
catherinezucker/radfil | radfil/profile_tools.py | 1 | 7704 | import numpy as np
import networkx as nx
from sklearn.neighbors import NearestNeighbors
from skimage import morphology
import scipy
import matplotlib.pyplot as plt
import math
from scipy.interpolate import RegularGridInterpolator
from scipy.spatial import distance
def curveorder(x,y):
"""
Sort pixels that make up the filament spine by the order in which they appear along the curve
Code taken from http://stackoverflow.com/questions/37742358/sorting-points-to-form-a-continuous-line
Parameters:
x: numpy.ndarray
A 1d array specifying the x coordinates of the pixels defining your filament spine
y: numpy.ndarray
A 1d array specifying the y coordinates of the pixels defining your filament spine
Returns:
xx, yy: the sorted x and y arrays given above
"""
# make the point list (N x 2)
pts=np.vstack((x,y)).T
# initiate the NN2 and the network graph
clf = NearestNeighbors(2).fit(pts)
G = clf.kneighbors_graph()
T = nx.from_scipy_sparse_matrix(G)
# candidate paths based on the network graph
paths = [list(nx.dfs_preorder_nodes(T, i)) for i in range(len(pts))]
## find the path with the lowest cost (distance) among the candidates
minidx = np.argmin([np.sum(np.diagonal(distance.cdist(pts[path], pts[path]), offset = 1)) for path in paths])
opt_order = paths[minidx]
## permute the head and the tail to find the correct order
### permutation
opt_order_swaphead = list(opt_order)
opt_order_swaphead[0], opt_order_swaphead[1] = opt_order_swaphead[1], opt_order_swaphead[0]
opt_order_swaptail = list(opt_order)
opt_order_swaptail[-1], opt_order_swaptail[-2] = opt_order_swaptail[-2], opt_order_swaptail[-1]
### find the correct order among the original and the two permuted
paths_opt = [opt_order, opt_order_swaphead, opt_order_swaptail]
minidx_opt = np.argmin([np.sum(np.diagonal(distance.cdist(pts[path], pts[path]), offset = 1)) for path in paths_opt])
opt_order_final = paths_opt[minidx_opt]
# return the ordered coordinates
xx = x[opt_order_final]
yy = y[opt_order_final]
## make it always go in the increasing y direction
if yy[-1] < yy[0]:
yy = yy[::-1]
xx = xx[::-1]
return(xx,yy)
def profile_builder(radobj, point, derivative, shift = True, fold = False):
'''
Build the profile using array manipulation, instead of looping.
Parameters:
radobj:
The object containing the image, the mask, and the axis for plotting.
point: tuple-like
The x and y pixel coordinates (corresponding to the 1st and the 0th axes) of
the point at the center.
derivative: tuple-like
Thee x and y components of the derivative of the spline at `point`. Used to
derive the profile cut.
shift: boolean
Indicates whether to shift the profile to center at the peak value.
fold: boolean
Indicates whether to fold around the central pixel, so that the final profile
will be a "half profile" with the peak near/at the center (depending on
whether it's shifted).
Returns:
final_dist: 1D numpy.ndarray
The distance array.
image_line: 1D numpy.ndarray
The value array.
'''
# Read the image and the mask
image, mask = radobj.image, radobj.mask
# Read the plotting axis
axis = radobj.ax
# Read the point and double check whether it's inside the mask.
x0, y0 = point
if (not mask[int(round(y0)), int(round(x0))]):
raise ValueError("The point is not in the mask.")
# Create the grid to calculate where the profile cut crosses edges of the
# pixels.
shapex, shapey = image.shape[1], image.shape[0]
edgex, edgey = np.arange(.5, shapex-.5, 1.), np.arange(.5, shapey-.5, 1.)
# Extreme cases when the derivative is (1, 0) or (0, 1)
if (derivative[0] == 0) or (derivative[1] == 0):
if (derivative[0] == 0) and (derivative[1] == 0):
raise ValueError("Both components of the derivative are zero; unable to derive a tangent.")
elif (derivative[0] == 0):
y_edgex = []
edgex = []
x_edgey = np.ones(len(edgey))*x0
elif (derivative[1] == 0):
y_edgex = np.ones(len(edgex))*y0
x_edgey = []
edgey = []
## The regular cases go here: calculate the crossing points of the cut and the grid.
else:
slope = -1./(derivative[1]/derivative[0])
y_edgex = slope*(edgex - x0) + y0
x_edgey = (edgey - y0)/slope + x0
### Mask out points outside the image.
pts_maskx = ((np.round(x_edgey) >= 0.) & (np.round(x_edgey) < shapex))
pts_masky = ((np.round(y_edgex) >= 0.) & (np.round(y_edgex) < shapey))
edgex, edgey = edgex[pts_masky], edgey[pts_maskx]
y_edgex, x_edgey = y_edgex[pts_masky], x_edgey[pts_maskx]
# Sort the points to find the center of each segment inside a single pixel.
## This also deals with when the cut crosses at the 4-corner point(s).
## The sorting is done by sorting the x coordinates
stack = sorted(list(set(zip(np.concatenate([edgex, x_edgey]),\
np.concatenate([y_edgex, edgey])))))
centers = stack[:-1]+.5*np.diff(stack, axis = 0)
## extract the values from the image and the original mask
#setup interpolation
xgrid=np.arange(0.5,radobj.image.shape[1]+0.5,1.0)
ygrid=np.arange(0.5,radobj.image.shape[0]+0.5,1.0)
interpolator = RegularGridInterpolator((xgrid,ygrid),radobj.image.T,bounds_error=False,fill_value=None)
image_line=interpolator(centers)
#image_line = image[np.round(centers[:, 1]).astype(int), np.round(centers[:, 0]).astype(int)]
mask_line = mask[np.round(centers[:, 1]).astype(int), np.round(centers[:, 0]).astype(int)]
#### select the part of the mask that includes the original point
mask_p0 = (np.round(centers[:, 0]).astype(int) == int(round(x0)))&\
(np.round(centers[:, 1]).astype(int) == int(round(y0)))
mask_line = (morphology.label(mask_line) == morphology.label(mask_line)[mask_p0])
# Extract the profile from the image.
## for the points within the original mask; to find the peak
if derivative[1] < 0.:
image_line0 = image_line[mask_line][::-1]
centers = centers[::-1]
mask_line = mask_line[::-1]
mask_p0 = mask_p0[::-1]
else:
image_line0 = image_line[mask_line]
## for the entire map
if derivative[1] < 0.:
image_line = image_line[::-1]
else:
image_line = image_line
# Plot.
peak_finder = centers[mask_line]
## find the end points of the cuts (within the original mask)
start, end = peak_finder[0], peak_finder[-1]
## find the peak here
xpeak, ypeak = peak_finder[image_line0 == np.nanmax(image_line0)][0]
## the peak mask is used to determine where to unfold when shift = True
mask_peak = (np.round(centers[:, 0]).astype(int) == int(round(xpeak)))&\
(np.round(centers[:, 1]).astype(int) == int(round(ypeak)))
## plot the cut
axis.plot([start[0], end[0]], [start[1], end[1]], 'r-', linewidth = 1.,alpha=1)
# Shift.
if shift:
final_dist = np.hypot(centers[:, 0]-xpeak, centers[:, 1]-ypeak)
# unfold
pos0 = np.where(mask_peak)[0][0]
final_dist[:pos0] = -final_dist[:pos0]
else:
final_dist = np.hypot(centers[:, 0]-x0, centers[:, 1]-y0)
# unfold
pos0 = np.where(mask_p0)[0][0]
final_dist[:pos0] = -final_dist[:pos0]
# Fold
if fold:
final_dist = abs(final_dist)
return final_dist, image_line, (xpeak, ypeak), (start, end)
| gpl-3.0 |
WaveBlocks/WaveBlocksND | WaveBlocksND/Plot3D/surfcf.py | 1 | 2375 | r"""The WaveBlocks Project
Function for plotting functions of the type :math:`f:I^2 -> C`
with :math:`|f|` as y-value and :math:`\arg(f)` as color code.
This function makes a 3D surface plot.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2014 R. Bourquin
@license: Modified BSD License
"""
from numpy import linspace, pi, squeeze, ones, real, fmod
from matplotlib.colors import hsv_to_rgb
from mayavi import mlab
def compute_color_map():
"""Compute a default QM colormap which can be used as mayavi/vtk lookup table.
"""
k = linspace(-pi, pi, 256, endpoint=True)
hsv_colors = ones((1, k.shape[0], 3))
hsv_colors[:, :, 0] = 0.5 * fmod(k + 2 * pi, 2 * pi) / pi
return 255 * squeeze(hsv_to_rgb(hsv_colors))
def surfcf(gridx, gridy, phase, modulus, colormap=None, view=None):
r"""Plot the modulus of a complex valued function :math:`f:R^2 -> C`
together with its phase in a color coded fashion.
:param gridx: The grid nodes along the :math:`x` axis of the real domain :math:`R^2`
:param gridy: The grid nodes along the :math:`y` axis of the real domain :math:`R^2`
:param phase: The phase of the complex domain result f(grid)
:param modulus: The modulus of the complex domain result f(grid)
:param colormap: The colormap to use, if none is given, compute the 'default' QM colormap.
"""
if colormap is None:
colormap = compute_color_map()
# The real(.) is necessary just to get an array with dtype real
src = mlab.pipeline.grid_source(real(gridx), real(gridy), real(modulus), scalars=real(phase))
# Clip to given view
if view is not None:
geometry_filter = mlab.pipeline.user_defined(src, filter='GeometryFilter')
geometry_filter.filter.extent_clipping = True
geometry_filter.filter.extent = view
src = mlab.pipeline.user_defined(geometry_filter, filter='CleanPolyData')
# Plot the surface
normals = mlab.pipeline.poly_data_normals(src)
mesh = mlab.pipeline.surface(normals)
# Set the custom color map
mesh.module_manager.scalar_lut_manager.use_default_range = False
mesh.module_manager.scalar_lut_manager.data_range = [-pi, pi]
lut = mesh.module_manager.scalar_lut_manager.lut.table.to_array()
lut[:, 0:3] = colormap.copy()
mesh.module_manager.scalar_lut_manager.lut.table = lut
return mesh
| bsd-3-clause |
gallantlab/pycortex | cortex/svgoverlay.py | 1 | 35417 | import os
import re
import copy
import shlex
import tempfile
import itertools
import numpy as np
import subprocess as sp
from matplotlib.path import Path
from scipy.spatial import cKDTree
from builtins import zip, str
from distutils.version import LooseVersion
from lxml import etree
from lxml.builder import E
from .options import config
from .testing_utils import INKSCAPE_VERSION
svgns = "http://www.w3.org/2000/svg"
inkns = "http://www.inkscape.org/namespaces/inkscape"
sodins = "http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
parser = etree.XMLParser(remove_blank_text=True, huge_tree=True)
cwd = os.path.abspath(os.path.split(__file__)[0])
class SVGOverlay(object):
"""Object to represent all vector graphic overlays (rois, sulci, etc) stored in an svg file
This object facilitates interaction with the information in the overlays.svg files
that exist for each subject in the pycortex database.
Parameters
----------
svgfile : string
svg file to read in. Must be formatted like the overlays.svg files in pycortex's
filestore
coords : array-like
(Unclear...)
overlays_available : list or tuple
list of layers of svg file to extract. If None, extracts all overlay layers
(i.e. all layers that do not contain images)
"""
def __init__(self, svgfile, coords=None, overlays_available=None):
self.svgfile = svgfile
self.overlays_available = overlays_available
self.reload()
if coords is not None:
self.set_coords(coords)
def reload(self):
"""Initial load of data from svgfile
Strips out `data` layer of svg file, saves only layers consisting of vector paths.
"""
self.svg = scrub(self.svgfile, overlays_available=self.overlays_available)
w = float(self.svg.getroot().get("width"))
h = float(self.svg.getroot().get("height"))
self.svgshape = w, h
# Grab relevant layers
self.layers = dict()
for layer in self.svg.getroot().findall("{%s}g"%svgns):
layer = Overlay(self, layer)
self.layers[layer.name] = layer
def set_coords(self, coords):
"""Unclear what this does. James??"""
# Normalize coordinates 0-1
if np.any(coords.max(0) > 1) or np.any(coords.min(0) < 0):
coords -= coords.min(0)
coords /= coords.max(0)
# Renormalize coordinates to shape of svg
self.coords = coords * self.svgshape
# Update of scipy (0.16+) means that cKDTree hangs / takes absurdly long to compute with new default
# balanced_tree=True. Seems only to be true on Mac OS, for whatever reason. Possibly a broken
# C library, unclear. Setting balanced_tree=False seems to resolve the issue, thus going with that for now
# See http://stackoverflow.com/questions/31819778/scipy-spatial-ckdtree-running-slowly
try:
# not compatible with scipy version < 0.16
self.kdt = cKDTree(self.coords, balanced_tree=False)
except:
# Older call signature
self.kdt = cKDTree(self.coords)
for layer in self:
for name in layer.labels.elements:
for element in layer.labels.elements[name]:
x, y = float(element.get("x")), float(element.get("y"))
dist, idx = self.kdt.query((x, self.svgshape[1]-y))
if idx >= len(self.kdt.data):
idx = 0
element.attrib['data-ptidx'] = str(idx)
def __getattr__(self, attr):
return self.layers[attr]
def __dir__(self):
return list(self.layers.keys()) + ['svg', 'svgfile', 'svgshape']
def __repr__(self):
return "<SVGOverlay with layers [%s]>"%(','.join(self.layers.keys()))
def __iter__(self):
return iter(self.layers.values())
def add_layer(self, name):
"""Add a layer to the svgfile on which this object is based
Adds a new layer named `name` to the svgfile by the SVGOverlay object, and
overwrites the original file (incorporating the new layer).
"""
svg = etree.parse(self.svgfile, parser=parser)
layer = _make_layer(svg.getroot(), name)
shapes = _make_layer(layer, "shapes")
shapes.attrib['id'] = "%s_shapes"%name
shapes.attrib['clip-path'] = "url(#edgeclip)"
labels = _make_layer(layer, "labels")
labels.attrib['id'] = "%s_labels"%name
with open(self.svgfile, "wb") as fp:
#try:
fp.write(etree.tostring(svg, pretty_print=True)) # python2.X
#except:
# fp.write(etree.tostring(svg, encoding=str, pretty_print=True)) # python3.X
self.reload()
def toxml(self, pretty=True):
"""Return a string xml version of the SVGOverlay object"""
return etree.tostring(self.svg, pretty_print=pretty)
def get_svg(self, filename=None, layers=['rois'], labels=True, with_ims=None):
"""Returns a new SVG file with images embedded
Parameters
----------
filename : string
File path to which to write new svg
layers : list
List of layer names to show
labels : boolean
Whether labels should be visible or not
with_ims : list
list of images to incorporate into new svg file. The first image
listed will be on the uppermost layer, the last will be lowest.
"""
outsvg = self.svg
if with_ims is not None:
if isinstance(with_ims, (list, tuple)):
with_ims = zip(range(len(with_ims)), with_ims)
datalayer = _make_layer(outsvg.getroot(), "data")
for imnum, im in reversed(list(with_ims)): # need list() with zip for python 3.5 compatibility
imlayer = _make_layer(datalayer, "image_%d" % imnum)
img = E.image(
{"{http://www.w3.org/1999/xlink}href":"data:image/png;base64,%s"%str(im,'utf-8')},
id="image_%d"%imnum, x="0", y="0",
width=str(self.svgshape[0]),
height=str(self.svgshape[1]),
)
imlayer.append(img)
outsvg.getroot().insert(0, imlayer)
for layer in self:
if layer.name in layers:
layer.visible = True
layer.labels.visible = labels
for name_, shape_ in layer.shapes.items():
shape_.visible = True
# Set visibility of labels (by setting text alpha to 0)
# This could be less baroque, but text elements currently
# do not have individually settable visibility / style params
tmp_style = copy.deepcopy(layer.labels.text_style)
tmp_style['fill-opacity'] = '1' if labels else '0'
tmp_style_str = ';'.join(['%s:%s'%(k,v) for k, v in tmp_style.items() if v != 'None'])
for i in range(len(layer.labels.elements[name_])):
layer.labels.elements[name_][i].set('style', tmp_style_str)
else:
layer.visible = False
layer.labels.visible = False
with open(filename, "wb") as outfile:
outfile.write(etree.tostring(outsvg))
print('Saved SVG to: %s'%filename)
def get_texture(self, layer_name, height, name=None, background=None, labels=True,
shape_list=None, **kwargs):
"""Renders a specific layer of this svgobject as a png
Parameters
----------
layer_name : string
Name of layer of svg file to be rendered
height : scalar
Height of image to be generated
name : string
If `background` is specified, provides a name for the background image
background : idkwtf
An image? Unclear.
labels : boolean
Whether to render labels for paths in the svg file
shape_list : list
list of string names for path/shape elements in this layer to be rendered
(any elements not on this list will be set to invisible, if this list is
provided)
kwargs : keyword arguments
keywords to specify display properties of svg path objects, e.g. {'stroke':'white',
'stroke-width':2} etc. See inkscape help for names for properties. This function
is used by quickflat.py, which provides dictionaries to map between more matplotlib-
like properties (linecolor->stroke, linewidth->stroke-width) for an easier-to-use API.
Returns
-------
image : array
Rendered image of svg layer with specified parameters
Notes
-----
missing bits=32 keyword input argument, did not seeme necessary to specify
png bits.
"""
# Give a more informative error in case we don't have inkscape
# installed
if INKSCAPE_VERSION is None:
raise RuntimeError(
"Inkscape doesn't seem to be installed on this system."
"SVGOverlay.get_texture requires inkscape."
"Please make sure that inkscape is installed and that is "
"accessible from the terminal.")
import matplotlib.pyplot as plt
# Set the size of the texture
if background is not None:
img = E.image(
{"{http://www.w3.org/1999/xlink}href":"data:image/png;base64,%s"%background},
id="image_%s"%name, x="0", y="0",
width=str(self.svgshape[0]),
height=str(self.svgshape[1]),
)
self.svg.getroot().insert(0, img)
if height is None:
height = self.svgshape[1]
#label_defaults = _parse_defaults(layer+'_labels')
# separate kwargs starting with "label-"
label_kwargs = {k[6:]:v for k, v in kwargs.items() if k[:6] == "label-"}
kwargs = {k:v for k, v in kwargs.items() if k[:6] != "label-"}
for layer in self:
if layer.name==layer_name:
layer.visible = True
layer.labels.visible = labels
if shape_list is not None:
for name_, shape_ in layer.shapes.items():
shape_.visible = name_ in shape_list
# Set visibility of labels (by setting text alpha to 0)
# This could be less baroque, but text elements currently
# do not have individually settable visibility / style params
tmp_style = copy.deepcopy(layer.labels.text_style)
tmp_style['fill-opacity'] = '1' if shape_.visible else '0'
tmp_style.update(label_kwargs)
tmp_style_str = ';'.join(['%s:%s'%(k,v) for k, v in tmp_style.items() if v != 'None'])
for i in range(len(layer.labels.elements[name_])):
layer.labels.elements[name_][i].set('style', tmp_style_str)
layer.set(**kwargs)
else:
layer.visible = False
layer.labels.visible = False
pngfile = name
if name is None:
png = tempfile.NamedTemporaryFile(suffix=".png")
pngfile = png.name
inkscape_cmd = config.get('dependency_paths', 'inkscape')
if LooseVersion(INKSCAPE_VERSION) < LooseVersion('1.0'):
cmd = "{inkscape_cmd} -z -h {height} -e {outfile} /dev/stdin"
else:
cmd = "{inkscape_cmd} -h {height} --export-filename {outfile} " \
"/dev/stdin"
cmd = cmd.format(inkscape_cmd=inkscape_cmd, height=height, outfile=pngfile)
proc = sp.Popen(shlex.split(cmd), stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = proc.communicate(etree.tostring(self.svg))
# print stderr, except the warning "Format autodetect failed."
if hasattr(stderr, 'decode'):
stderr = stderr.decode()
for line in stderr.split('\n'):
if line != '' and 'Format autodetect failed.' not in line:
print(line)
if background is not None:
self.svg.getroot().remove(img)
if name is None:
png.seek(0)
im = plt.imread(png)
return im
class Overlay(object):
"""Class to represent a single layer of an SVG file
"""
def __init__(self, svgobject, layer):
self.svgobject = svgobject
self.svg = svgobject.svg
self.layer = layer
self.name = layer.attrib['{%s}label'%inkns]
self.layer.attrib['class'] = 'display_layer'
# Check to see if the layer is locked, to see if we need to override the style
locked = '{%s}insensitive'%sodins
self.shapes = dict()
for layer_ in _find_layer(layer, "shapes").findall("{%s}g"%svgns):
override = locked not in layer_.attrib or layer_.attrib[locked] == "false"
shape = Shape(layer_, self.svgobject.svgshape[1], override_style=override)
self.shapes[shape.name] = shape
self.labels = Labels(self)
def __repr__(self):
return "<svg layer with shapes [%s]>"%(','.join(self.shapes.keys()))
def __getitem__(self, name):
return self.shapes[name]
@property
def visible(self):
return 'none' not in self.layer.attrib['style']
@visible.setter
def visible(self, value):
style = "display:inline;" if value else "display:none;"
self.layer.attrib['style'] = style
def set(self, **kwargs):
for shape in list(self.shapes.values()):
shape.set(**kwargs)
def get_mask(self, name):
return self.shapes[name].get_mask(self.svgobject.coords)
def add_shape(self, name, pngdata=None, add_path=True):
"""Adds projected data for defining a new ROI to the saved overlays.svg file in a new layer"""
# self.svg deletes the images -- we want to save those, so let's load it again
svg = etree.parse(self.svgobject.svgfile, parser=parser)
imglayer = _find_layer(svg, "data")
if add_path:
layer = _find_layer(svg, self.name)
_make_layer(_find_layer(layer, "shapes"), name)
# Hide all the other layers in the image
for layer in imglayer.findall(".//{%s}g"%svgns):
layer.attrib["style"] = "display:hidden;"
layer = _make_layer(imglayer, "img_%s"%name)
layer.append(E.image(
{"{http://www.w3.org/1999/xlink}href":"data:image/png;base64,%s"%pngdata},
id="image_%s"%name, x="0", y="0",
width=str(self.svgobject.svgshape[0]),
height=str(self.svgobject.svgshape[1]),
))
with open(self.svgobject.svgfile, "wb") as xml:
#try:
xml.write(etree.tostring(svg, pretty_print=True)) # python2.X
#except:
# xml.write(etree.tostring(svg, encoding=str, pretty_print=True)) # python3.X
class Labels(object):
def __init__(self, overlay):
self.overlay = overlay
self.layer = _find_layer(self.overlay.layer, "labels")
# This should be layer-specific,and read from different fields in the options.cfg file
# if possible, falling back to overlay_text if the speific layer defaults don't exist.
# Don't have time to figure out how to get the layer name here (rois, sulci, etc)
self.text_style = dict(config.items("overlay_text"))
text_style = self.text_style.items()
text_style = ';'.join(['%s:%s'%(k,v) for k, v in text_style if v != 'None'])
#generate a list of labels that should be in the layer
self.elements = dict()
for shape in self.overlay.shapes.values():
self.elements[shape.name] = shape.get_labelpos()
# match up existing labels with their respective paths
def close(pt, x, y):
try:
xx, yy = pt[0], pt[1]
except IndexError: # when loading overlay from a dataset pack
xx, yy = float(pt.get('x')), float(pt.get('y'))
return np.sqrt((xx - x)**2 + (yy-y)**2) < 250
for text in self.layer.findall(".//{%s}text"%svgns):
x = float(text.get('x'))
y = float(text.get('y'))
#check this element against all known paths
for name in self.elements.keys():
if text.text == name:
for i, pos in enumerate(self.elements[name]):
if close(pos, x, y):
self.elements[name][i] = text
#add missing elements
self.override = []
for name in self.elements.keys():
for i, pos in enumerate(self.elements[name]):
if isinstance(pos, np.ndarray):
text = etree.SubElement(self.layer, "{%s}text"%svgns)
text.text = name
text.attrib["x"] = str(pos[0])
text.attrib["y"] = str(pos[1])
text.attrib['style'] = text_style
self.elements[name][i] = text
self.override.append(text)
def set(self, override=False, **kwargs):
self.text_style.update(kwargs)
text_style = self.text_style.items()
text_style = ';'.join(['%s:%s'%(k,v) for k, v in text_style if v != 'None'])
labels = self.override
if override:
labels = self.labels.findall(".//{%s}text"%svgns)
for element in labels:
element.attrib['style'] = text_style
@property
def visible(self):
return self.text_style['display'] != "none"
@visible.setter
def visible(self, value):
if value:
self.text_style['display'] = 'inline'
else:
self.text_style['display'] = 'none'
self.set()
class Shape(object):
def __init__(self, layer, height, override_style=True):
self.layer = layer
self.height = height
self.name = layer.attrib['{%s}label'%inkns]
self.paths = layer.findall('{%s}path'%svgns)
#default style
self.style = dict(config.items("overlay_paths"))
locked = '{%s}insensitive'%sodins
if not override_style or locked in layer.attrib:
self._get_style()
self.set()
def _get_style(self):
# populate the style dictionary with the first path that has a style tag
for path in self.paths:
if 'style' in path.attrib:
style = dict(s.split(':') for s in path.attrib['style'].split(";"))
self.style.update(style)
break
def set(self, **kwargs):
self.style.update(**kwargs)
style = ';'.join(['%s:%s'%(k,v) for k, v in self.style.items() if v != "None"])
for path in self.paths:
path.attrib['style'] = style
def get_labelpos(self):
labels = []
for path in self.paths:
pos = _parse_svg_pts(path.attrib['d'])
labels.append(_center_pts(pos))
return labels
def get_mask(self, vts):
"""get list of vertices inside this roi"""
if len(self.splines)==0:
# No splines defined for this (ROI). Wut.
import warnings
warnings.warn("Requested layer in svg file (%s) contains no splines"%self.name)
return []
# Annoying: The paths created are upside-down wrt vertex coordinates. So flip them.
verts_upside_down = copy.copy(vts)
verts_upside_down[:, 1] = self.height - verts_upside_down[:, 1]
verts_in_any_path = [p.contains_points(verts_upside_down) for p in self.splines]
vert_idx_list = np.hstack([np.nonzero(v)[0] for v in verts_in_any_path])
return vert_idx_list
@property
def splines(self):
return [gen_path(p) for p in self.paths]
@property
def visible(self):
return 'none' not in self.layer.attrib['style']
@visible.setter
def visible(self, value):
style = "display:inline;" if value else "display:none;"
self.layer.attrib['style'] = style
###################################################################################
# SVG Helper functions
###################################################################################
def _find_layer_names(svg):
layers = svg.findall("{%s}g[@{%s}label]"%(svgns, inkns))
layer_names = [l.get("{%s}label"%inkns) for l in layers]
return layer_names
def _find_layer(svg, label):
layers = [l for l in svg.findall("{%s}g[@{%s}label]"%(svgns, inkns)) if l.get("{%s}label"%inkns) == label]
if len(layers) < 1:
raise ValueError("Cannot find layer %s"%label)
return layers[0]
def _make_layer(parent, name):
layer = etree.SubElement(parent, "{%s}g"%svgns)
layer.attrib['id'] = name
layer.attrib['style'] = "display:inline;"
layer.attrib["{%s}label"%inkns] = name
layer.attrib["{%s}groupmode"%inkns] = "layer"
return layer
try:
from shapely.geometry import Polygon
def _center_pts(pts):
'''Fancy label position generator, using erosion to get label coordinate'''
min = pts.min(0)
pts -= min
max = pts.max(0)
max[max == 0] = 1
pts /= max
#probably don't need more than 20 points, reduce detail of the polys
if len(pts) > 20:
pts = pts[::len(pts)//20]
try:
if len(pts) < 3:
raise RuntimeError()
poly = Polygon([tuple(p) for p in pts])
last_i = None
for i in np.linspace(0,1,100):
if poly.buffer(-i).is_empty:
if last_i is None:
raise RuntimeError()
a = list(poly.buffer(-last_i).centroid.coords)[0] * max + min
return a
last_i = i
import warnings
warnings.warn("Unable to find zero centroid.")
return list(poly.buffer(-100).centroid.coords)[0] * max + min
except RuntimeError:
return np.nanmean(pts, 0) * max + min
except (ImportError, OSError):
import warnings
warnings.warn("Cannot find shapely, using simple label placement.")
def _center_pts(pts):
return np.nanmean(pts, 0)
def _labelpos(pts):
if pts.ndim < 3:
return _center_pts(pts)
ptm = pts.copy().astype(float)
ptm -= ptm.mean(0)
u, s, v = np.linalg.svd(ptm, full_matrices=False)
sp = np.diag(s)
sp[-1,-1] = 0
try:
x, y = _center_pts(np.dot(ptm, np.dot(v.T, sp))[:,:2])
except Exception as e:
print(e)
sp = np.diag(1./(s+np.finfo(float).eps))
pt = np.dot(np.dot(np.array([x,y,0]), sp), v)
return pt + pts.mean(0)
def _split_multipath(pathstr):
"""Appropriately splits an SVG path with multiple sub-paths.
"""
# m is absolute path, M is relative path (or vice versa?)
if not pathstr[0] in ["m","M"]:
raise ValueError("Bad path format: %s" % pathstr)
import re
subpaths = [sp for sp in re.split('[Mm]',pathstr) if len(sp)>0]
headers = re.findall('[Mm]',pathstr)
for subpath,header in zip(subpaths,headers):
# Need further parsing of multi-path strings? perhaps no.
yield (header + subpath).strip()
def scrub(svgfile, overlays_available=None):
"""Remove data layers from an svg object prior to rendering
Returns etree-parsed svg object
"""
svg = etree.parse(svgfile, parser=parser)
try:
layers_to_remove = ['data']
if overlays_available is not None:
overlays_to_remove = [x for x in _find_layer_names(svg) if x not in overlays_available]
layers_to_remove = overlays_to_remove
for layer in layers_to_remove:
rmnode = _find_layer(svg, layer)
rmnode.getparent().remove(rmnode)
except ValueError:
# Seems sketch - should catch this?
pass
svgtag = svg.getroot()
svgtag.attrib['id'] = "svgoverlay"
inkver = "{%s}version"%inkns
if inkver in svgtag.attrib:
del svgtag.attrib[inkver]
try:
for tagname in ["{%s}namedview"%sodins, "{%s}metadata"%svgns]:
for tag in svg.findall(".//%s"%tagname):
tag.getparent().remove(tag)
except:
import traceback
traceback.print_exc()
return svg
def make_svg(pts, polys):
from .polyutils import trace_poly, boundary_edges
pts = pts.copy()
pts -= pts.min(0)
pts *= 1024 / pts.max(0)[1]
pts[:,1] = 1024 - pts[:,1]
path = ""
left, right = trace_poly(boundary_edges(polys))
for poly in [left, right]:
path +="M%f %f L"%tuple(pts[poly.pop(0), :2])
path += ', '.join(['%f %f'%tuple(pts[p, :2]) for p in poly])
path += 'Z '
w, h = pts.max(0)[:2]
with open(os.path.join(cwd, "svgbase.xml")) as fp:
svg = fp.read().format(width=w, height=h, clip=path)
return svg
def get_overlay(subject, svgfile, pts, polys, remove_medial=False,
overlays_available=None, modify_svg_file=True, **kwargs):
"""Return a python represent of the overlays present in `svgfile`.
Parameters
----------
subject: str
Name of the subject.
svgfile: str
File name with the overlays (.svg).
pts: array of shape (n_vertices, 3)
Coordinates of all vertices, as returned by for example by
cortex.db.get_surf.
polys: arrays of shape (n_polys, 3)
Indices of the vertices of all polygons, as returned for example by
cortex.db.get_surf.
remove_medial: bool
Whether to remove duplicate vertices. If True, the function also
returns an array with the unique vertices.
overlays_available: tuple or None
Overlays to keep in the result. If None, then all overlay layers of
the SVG file will be available in the result. If None, also add 3 empty
layers named 'sulci', 'cutouts', and 'display' (if not already
present).
modify_svg_file: bool
Whether to modify the SVG file when overlays_available=None, which can
add layers 'sulci', 'cutouts', and 'display' (if not already present).
If False, the SVG file will not be modified.
**kwargs
Other keyword parameters are given to the SVGOverlay constructor.
Returns
-------
svg : SVGOverlay instance.
Object with the overlays.
valid : array of shape (n_vertices, )
Indices of all vertices (without duplicates).
Only returned if remove_medial is True.
"""
cullpts = pts[:,:2]
if remove_medial:
valid = np.unique(polys)
cullpts = cullpts[valid]
if not os.path.exists(svgfile):
# Overlay file does not exist yet! We need to create and populate it
# I think this should be an entirely separate function, and it should
# be made clear when this file is created - opening a git issue on
# this soon...ML
print("Create new file: %s" % (svgfile, ))
with open(svgfile, "wb") as fp:
fp.write(make_svg(pts.copy(), polys).encode())
svg = SVGOverlay(svgfile, coords=cullpts, **kwargs)
## Add default layers
from .database import db
import io
from . import quickflat
import binascii
# Curvature
for layer_name, cmap in zip(['curvature', 'sulcaldepth', 'thickness'], ['gray', 'RdBu_r', 'viridis']):
try:
curv = db.get_surfinfo(subject, layer_name)
except:
print("Failed to import svg layer for %s, continuing"%layer_name)
continue
curv.cmap = cmap
vmax = np.abs(curv.data).max()
curv.vmin = -vmax
curv.vmax = vmax
fp = io.BytesIO()
quickflat.make_png(fp, curv, height=1024, with_rois=False, with_labels=False)
fp.seek(0)
svg.rois.add_shape(layer_name, binascii.b2a_base64(fp.read()).decode('utf-8'), False)
else:
if not modify_svg_file:
# To avoid modifying the svg file, we copy it in a temporary file
import shutil
svg_tmp = tempfile.NamedTemporaryFile(suffix=".svg")
svgfile_tmp = svg_tmp.name
shutil.copy2(svgfile, svgfile_tmp)
svgfile = svgfile_tmp
svg = SVGOverlay(svgfile,
coords=cullpts,
overlays_available=overlays_available,
**kwargs)
if overlays_available is None:
# Assure all layers are present
# (only if some set of overlays is not specified)
# NOTE: this actually modifies the svg file.
# Use allow_change=False to avoid modifying the svg file.
for layer in ['sulci', 'cutouts', 'display']:
if layer not in svg.layers:
svg.add_layer(layer)
if remove_medial:
return svg, valid
return svg
## From svg.path (https://github.com/regebro/svg.path/blob/master/src/svg/path/parser.py)
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def _parse_svg_pts(datastr):
data = list(_tokenize_path(datastr))
#data = data.replace(",", " ").split()
if data.pop(0).lower() != "m":
raise ValueError("Unknown path format")
#offset = np.array([float(x) for x in data[1].split(',')])
offset = np.array([float(x) for x in [data.pop(0), data.pop(0)]])
mode = "l"
pts = [[offset[0], offset[1]]]
def canfloat(n):
try:
float(n)
return True
except ValueError:
return False
lastlen = len(data)
while len(data) > 0:
#print mode, data
if not canfloat(data[0]):
mode = data.pop(0)
continue
if mode == "l":
offset += list([float(x) for x in [data.pop(0), data.pop(0)]])
elif mode == "L":
offset = np.array(list([float(x) for x in [data.pop(0), data.pop(0)]]))
elif mode == "h":
offset += list([float(x) for x in [data.pop(0), 0]])
elif mode == 'H':
offset = np.array(list([float(x) for x in [data.pop(0), 0]]))
elif mode == "v":
offset += list([float(x) for x in [0, data.pop(0)]])
elif mode == "V":
offset = np.array(list([float(x) for x in [0, data.pop(0)]]))
elif mode == "c":
data = data[4:]
offset += list([float(x) for x in [data.pop(0), data.pop(0)]])
elif mode == "C":
data = data[4:]
offset = np.array(list([float(x) for x in [data.pop(0), data.pop(0)]]))
#support multi-part paths, by only using one label for the whole path
elif mode == 'm' :
offset += list([float(x) for x in [data.pop(0), data.pop(0)]])
elif mode == 'M' :
offset = list([float(x) for x in [data.pop(0), data.pop(0)]])
## Check to see if nothing has happened, and, if so, fail
if len(data) == lastlen:
raise ValueError("Error parsing path.")
else:
lastlen = len(data)
pts.append([offset[0],offset[1]])
return np.array(pts)
def import_roi(roifile, outfile):
"""Convert rois.svg file (from previous versions of pycortex) to overlays.svg"""
import warnings
warnings.warn("Converting rois.svg to overlays.svg")
svg = etree.parse(roifile, parser=parser)
label_layer = None
for layer in svg.findall("{%s}g[@{%s}label]"%(svgns, inkns)):
name = layer.get("{%s}label"%inkns)
if name == "data":
#maintain data layer, do not process
pass
elif name == "roilabels": #label layer
label_layer = layer
layer.getparent().remove(layer)
else:
parent = _make_layer(layer.getparent(), name)
layer.getparent().remove(layer)
layer.attrib['id'] = '%s_shapes'%name
layer.attrib['{%s}label'%inkns] = 'shapes'
layer.attrib['clip-path'] = "url(#edgeclip)"
parent.append(layer)
labels = _make_layer(parent, "labels")
labels.attrib['id'] = '%s_labels'%name
if label_layer is not None:
rois = _find_layer(svg, "rois")
labels = _find_layer(rois, 'labels')
rois.remove(labels)
label_layer.attrib['id'] = 'rois_labels'
label_layer.attrib['{%s}label'%inkns] = 'labels'
rois.append(label_layer)
with open(outfile, "wb") as fp:
fp.write(etree.tostring(svg, pretty_print=True))
# Final check for all layers
svgo = SVGOverlay(outfile)
for new_layer in ['sulci', 'cutouts', 'display']:
if new_layer not in svgo.layers:
svgo.add_layer(new_layer)
def gen_path(path):
mdict = dict(m=Path.MOVETO, l=Path.LINETO, h=Path.LINETO, v=Path.LINETO)
verts, codes = [], []
mode, pen = None, np.array([0.,0.])
it = iter(path.get('d').split(' '))
run = True
while run:
try:
cmd = next(it)
if len(cmd) == 1:
mode = cmd
if cmd.lower() == 'z':
verts.append([0,0])
codes.append(Path.CLOSEPOLY)
elif mode.lower() == 'c':
p1 = [float(ss) for ss in cmd.split(',')]
p2 = [float(ss) for ss in next(it).split(',')]
p3 = [float(ss) for ss in next(it).split(',')]
if mode == 'c':
verts.append(pen + p1)
verts.append(pen + p2)
verts.append(pen + p3)
pen += p3
else:
verts.append(p1)
verts.append(p2)
verts.append(p3)
pen = np.array(p3)
codes.append(Path.CURVE4)
codes.append(Path.CURVE4)
codes.append(Path.CURVE4)
else:
if mode.lower() == 'h':
val = [float(cmd), 0]
elif mode.lower() == 'v':
val = [0, float(cmd)]
else:
val = [float(cc) for cc in cmd.split(',')]
codes.append(mdict[mode.lower()])
if mode.lower() == mode:
pen += val
verts.append(pen.tolist())
else:
pen = np.array(val)
verts.append(val)
if mode == 'm':
mode = 'l'
elif mode == 'M':
mode = 'L'
except StopIteration:
run = False
return Path(verts, codes=codes)
| bsd-2-clause |
brianr747/sfc_gui | sfc_gui/utils.py | 1 | 8532 | # coding=utf-8
"""
License/Disclaimer
------------------
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import sys
import traceback
import matplotlib
matplotlib.use('TKagg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot
from sfc_models.models import Model
if sys.version_info[0] < 3:
import Tkinter as tk
from Tkinter import *
from Tkinter import messagebox
from Tkinter import ttk
else:
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
class Parameters(object):
"""
Class to hold common data members that are shared by multiple frame objects.
Not all Frames will use all data.
"""
def __init__(self):
self.Model = Model()
self.ModelName = ''
self.TimeSeriesHolder = self.Model.EquationSolver.TimeSeries
self.TimeAxisVariable = 'k'
self.MinWidth = 800
self.MinHeight = 600
self.LogDir = ''
self.SourceOptions = ('Time Series', 'Initial Steady State', 'Convergence Trace')
self.LastSource = ''
self.TimeSeriesWidget = None
self.TimeAxisMinimum = None
self.TimeStart = None
self.TimeRange = None
def SetModel(self, model):
self.Model = model
self.LastSource = ''
self.SetTimeSeriesHolder()
def SetTimeSeriesHolder(self, source_str='Time Series'):
opt = source_str
if opt not in self.SourceOptions:
raise ValueError('Unknown time series source: ' + opt)
if opt == self.LastSource:
return
if opt == self.SourceOptions[0]:
holder = self.Model.EquationSolver.TimeSeries
if opt == self.SourceOptions[1]:
holder = self.Model.EquationSolver.TimeSeriesInitialSteadyState
if opt == self.SourceOptions[2]:
holder = self.Model.EquationSolver.TimeSeriesStepTrace
self.TimeSeriesHolder = holder
self.TimeAxisVariable = self.TimeSeriesHolder.TimeSeriesName
if self.TimeAxisVariable not in holder:
holder[self.TimeAxisVariable] = [0.0, 1.0]
self.TimeAxisMinimum = int(self.GetTimeSeries(self.TimeAxisVariable)[0])
self.TimeRange = 40 # None
self.TimeStart = self.TimeAxisMinimum
self.TimeSeriesList = holder.GetSeriesList()
if self.TimeSeriesWidget is not None:
self.TimeSeriesWidget.set(self.TimeSeriesList)
self.LastSource = opt
return holder
def GetTimeSeries(self, series_name):
ser = self.TimeSeriesHolder[series_name]
return ser
class WidgetHolder(object):
def __init__(self):
self.Widgets = {}
self.Data = {}
self.ListBoxType = {}
self.MatplotlibInfo = {}
def AddEntry(self, parent, name, readonly=False):
self.Data[name] = StringVar()
if readonly:
self.Widgets[name] = Entry(parent, state=['readonly',], textvariable=self.Data[name])
else:
self.Widgets[name] = Entry(parent, textvariable=self.Data[name])
def AddButton(self, parent, name, text, command, state='!disabled'):
self.Widgets[name] = ttk.Button(parent, text=text, command=command, state=state)
def AddTree(self, parent, name, columns):
self.Widgets[name] = ttk.Treeview(parent, columns=columns)
def AddListBox(self, parent, name, height=10, single_select=True, callback=None):
if single_select:
select_mode = 'browse'
else:
select_mode='extended'
self.ListBoxType[name] = select_mode
self.Data[name] = StringVar()
self.Widgets[name] = Listbox(parent, listvariable=self.Data[name], height=height,
selectmode=select_mode)
if callback is not None:
self.Widgets[name].bind('<<ListboxSelect>>', callback)
def GetListBox(self, name):
"""
If single_select: returns string or None (no selection).
If multi-select, always returns a list of strings (possibly empty).
:param name:
:return:
"""
indices = self.Widgets[name].curselection()
mlist = self.Data[name].get()
mlist = eval(mlist)
if self.ListBoxType[name] == 'browse':
if len(indices) == 0:
return None
else:
return mlist[indices[0]]
else:
return [mlist[x[0]] for x in indices]
def SetListBox(self, name, value):
if type(value) == str:
if value == '':
value = []
else:
value = [value,]
used = tuple(value)
# used = value
# if len(value) == 0:
# used = ''
# elif len(value) == 1:
# used = value[0]
self.Data[name].set(used)
def DeleteTreeChildren(self, name, item_code):
treewidget = self.Widgets[name]
children = treewidget.get_children(item_code)
for child in children:
treewidget.delete(child)
def AddMatplotLib(self, parent, name):
Fig = matplotlib.figure.Figure(figsize=(7.5, 5), dpi=90)
subplot = Fig.add_subplot(111)
x = []
y = []
self.MatplotlibInfo[name+"line"], = subplot.plot(x, y, 'bo-')
self.MatplotlibInfo[name+'canvas'] = FigureCanvasTkAgg(Fig, master=parent)
def AddRadioButtons(self, parent, name, options):
self.Data[name] = StringVar()
widgies = []
for opt in options:
widgies.append(ttk.Radiobutton(parent, text=opt, variable=self.Data[name], value=opt))
self.Widgets[name] = widgies
def AddVariableLabel(self, parent, name):
self.Data[name] = StringVar()
self.Widgets[name] = tk.Label(parent, textvariable=self.Data[name])
def GetMatplotlibInfo(self, name, objectname):
if not objectname in ('line', 'canvas'):
raise ValueError('Unknown type of object')
return self.MatplotlibInfo[name+objectname]
def sort_series(serlist):
"""
Sort a list of series names alphabetically, except for 'k' and 't' (at the front).
Works on a copy, and returns it. (Not an in-place sort.)
This should be moved to sfc_models, since the same code appears there.
:param serlist: list
:return:
"""
new_serlist = copy.copy(serlist)
new_serlist.sort()
if 't' in new_serlist:
new_serlist.remove('t')
new_serlist.insert(0, 't')
if 'k' in new_serlist:
new_serlist.remove('k')
new_serlist.insert(0,'k')
return new_serlist
def get_int(val, accept_None=True):
try:
val_n = int(val)
except:
if accept_None and val.lower() in ('none', 'na', ''):
val_n = None
else:
raise
return val_n
def get_series_info(series_name, mod):
desc = ''
eqn = ''
try:
eq = mod.FinalEquationBlock[series_name]
eqn = eq.GetRightHandSide()
desc = eq.Description
eqn_str = '{0} = {1}'.format(series_name, eqn)
except KeyError:
# k is one variable that will not be in the FinalEquationBlock
eqn_str = ''
if series_name == 'k':
desc = '[k] Time Axis'
eqn_str = 'k = k (!)'
if eqn_str == '' and series_name == 't':
eqn_str = 't = k'
desc = '[t] Automatically generated time axis; user may override as a global equation.'
if eqn_str == '' and series_name == 'iteration':
desc = 'The iteration step within the solver algorithm'
if eqn_str == '' and series_name == 'iteration_error':
desc = 'Fitting error for equations at each iteration of the solver.'
return eqn_str, desc
def ErrorDialog(ex):
msg = "Error: {0}\n\n{1}".format(str(ex), ''.join(traceback.format_exc(limit=4)))
messagebox.showinfo(message=msg, icon='error', title='Error')
| apache-2.0 |
kostajaitachi/shogun | examples/undocumented/python_modular/graphical/interactive_clustering_demo.py | 16 | 11310 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib import mpl
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'o', color='0.7')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'o', color='0.5')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
self.k.setEnabled(True)
def train_svm(self):
k = int(self.k.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ko')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'ko')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
distance_name = self.distance_combo.currentText()
if distance_name == "EuclideanDistance":
distance=EuclideanDistance(train, train)
elif distance_name == "ManhattanMetric":
distance=ManhattanMetric(train, train)
elif distance_name == "JensenMetric":
distance=JensenMetric(train, train)
kmeans=KMeans(k, distance)
kmeans.train()
centers = kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
self.axes.plot(features[0,labels==+1], features[1,labels==+1],'ro')
self.axes.plot(features[0,labels==-1], features[1,labels==-1],'bo')
for i in xrange(k):
self.axes.plot(centers[0,i],centers[1,i],'kx', markersize=20, linewidth=5)
t = numpy.linspace(0, 2*numpy.pi, 100)
self.axes.plot(radi[i]*numpy.cos(t)+centers[0,i],radi[i]*numpy.sin(t)+centers[1,i],'k-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
k_label = QLabel('Number of Clusters')
self.k = QLineEdit()
self.k.setText("2")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(k_label)
spins_hbox.addWidget(self.k)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Cluster!")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.distance_combo = QComboBox()
self.distance_combo.insertItem(-1, "EuclideanDistance")
self.distance_combo.insertItem(-1, "ManhattanMetric")
self.distance_combo.insertItem(-1, "JensenMetric")
self.distance_combo.maximumSize = QSize(300, 50)
self.connect(self.distance_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.distance_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| gpl-3.0 |
thilbern/scikit-learn | sklearn/utils/tests/test_random.py | 38 | 7410 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
arabenjamin/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
feiyanzhandui/tware | refind/executor.py | 2 | 13123 | #!/usr/bin/python
import json
import numpy as np
import pandas as pd
import time
from multiprocessing import Process
from itertools import combinations
from sklearn.cross_validation import train_test_split
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import SelectPercentile
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.utils import compute_class_weight
from task import ClassifyTask
from task import CorrelateTask
from task import DifferenceTask
from task import FeatureSelectTask
from task import FrequentItemsetsTask
from task import IntersectTask
from task import LoadTask
from task import MergeTask
from task import ProjectTask
from task import SelectTask
from task import UnionTask
from result import ClassifyResult
from result import ErrorResult
from result import TransformResult
from util import classify_stats
from util import get_classifier
from util import Timer
TILE_SIZE = 1000
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report
class Executor(Process):
def __init__(self, catalog, results, task):
Process.__init__(self)
self.catalog = catalog
self.results = results
self.task = task
self.timer = Timer()
def get_result(self, uuid):
result = self.results[uuid]
while result.complete == 0.0:
time.sleep(0.0005)
result = self.results[uuid]
return result
def wait(self, uuid):
while self.results[uuid].complete == 0.0:
time.sleep(0.0005)
def run(self):
self.timer.start()
try:
if isinstance(self.task, ClassifyTask):
self.classify()
elif isinstance(self.task, CorrelateTask):
self.correlate()
elif isinstance(self.task, DifferenceTask):
self.difference()
elif isinstance(self.task, FeatureSelectTask):
self.feature_select()
elif isinstance(self.task, FrequentItemsetsTask):
self.frequent_itemsets()
elif isinstance(self.task, IntersectTask):
self.intersect()
elif isinstance(self.task, LoadTask):
self.load()
elif isinstance(self.task, MergeTask):
self.merge()
elif isinstance(self.task, ProjectTask):
self.project()
elif isinstance(self.task, SelectTask):
self.select()
elif isinstance(self.task, UnionTask):
self.union()
else:
raise NotImplementedError()
except Exception as e:
print str(e)
result = ErrorResult(self.task, str(e))
self.results[self.task.uuid] = result
self.timer.stop()
print 'task' + str(self.task.uuid) + ': ' + str(self.timer.time()) + 's'
def classify(self):
raise NotImplementedError()
def correlate(self):
raise NotImplementedError()
def difference(self):
raise NotImplementedError()
def feature_select(self):
raise NotImplementedError()
def frequent_itemsets(self):
raise NotImplementedError()
def intersect(self):
raise NotImplementedError()
def load(self):
raise NotImplementedError()
def merge(self):
raise NotImplementedError()
def project(self):
raise NotImplementedError()
def select(self):
raise NotImplementedError()
def union(self):
raise NotImplementedError()
class BasicExecutor(Executor):
def __init__(self, catalog, results, task):
Executor.__init__(self, catalog, results, task)
def classify(self):
y_data = self.get_result(self.task.label)
X_data = self.get_result(self.task.features)
y = np.array(y_data.data).ravel()
X = np.array(pd.get_dummies(X_data.data))
#X = MinMaxScaler().fit_transform(X)
X_train = X[:-TILE_SIZE]
y_train = y[:-TILE_SIZE]
X_test = X[-TILE_SIZE:]
y_test = y[-TILE_SIZE:]
cw = compute_class_weight('auto', np.array([0,1]), y)
cw = {0:cw[0],1:cw[1]}
b = get_classifier(self.task.classifier, cw)
b.partial_fit(X_train, y_train, classes=np.array([0,1]))
y_prob = None
y_pred = None
if self.task.classifier in ['perceptron','svm']:
y_pred = b.predict(X_test)
y_prob = np.array([[0,y] for y in y_pred])
else:
y_prob = b.predict_proba(X_test)
y_pred = [1 if t[0] >= 0.5 else 0 for t in y_prob]
cm = confusion_matrix(y_test, y_pred)
stats = classify_stats(cm, y_test, y_prob, TILE_SIZE)
result = ClassifyResult(self.task, 1.0, b, stats)
self.results[self.task.uuid] = result
def correlate(self):
source = self.results[self.task.source]
#wait(source)
result = TransformResult(self.task, 1.0, source.data.corr())
self.results[self.task.uuid] = result
def difference(self):
left = self.results[self.task.left]
#wait(left)
ldata = left.data
right = self.results[self.task.right]
#wait(right)
rdata = right.data
data = ldata[~ldata.isin(rdata)]
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def feature_select(self):
b = SelectPercentile(f_classif, percentile=task.percentile)
y = np.array(self.results[self.task.label].data)
X = np.array(self.results[self.task.features].data)
data = pd.DataFrame(b.fit_transform(X, y))
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def frequent_itemsets(self):
source = self.get_result(self.task.source)
data = source.data
size = float(len(data.index))
print 'size',size
itemsets = []
C = set(data.columns)
k = 1
while len(C) > k:
C_next = set()
for c in combinations(C, k):
support = pd.DataFrame(data[list(c)] != 0).product(1).sum(0) / size
if support > self.task.support:
itemsets.append((str(c),support))
for x in c:
C_next.add(x)
C = C_next
k += 1
itemsets = pd.DataFrame(itemsets, columns=['items','support'])
print 'len:', len(itemsets.index)
print 'final:', itemsets
result = TransformResult(self.task, 1.0, itemsets)
self.results[self.task.uuid] = result
def intersect(self):
sources = [self.results[s] for s in self.task.sources]
#wait(sources[0])
data = sources[0].data
for s in sources[1:]:
#wait(s)
data = data[data.isin(s.data)].dropna(how='all')
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def load(self):
name = self.task.name
schema_file = self.task.schema_file
data_file = self.task.data_file
data = pd.read_csv(data_file)
schema = None
if schema_file is None:
attrs = data.columns
dtypes = data.dtypes
schema = [(attr,str(dtypes[attr])) for attr in attrs]
else:
with open(self.task.schema_file) as f:
schema = json.load(f)
self.catalog[name] = {'uuid': self.task.uuid, 'schema': schema}
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def merge(self):
sources = pd.DataFrame(self.get_result(self.task.sources[0]).y_pred)
for source in self.task.sources[1:]:
s = pd.DataFrame(self.get_result(source).y_pred)
sources = pd.merge(sources, s, left_index=True, right_index=True, how='outer')
result = TransformResult(self.task, 1.0, sources)
self.results[self.task.uuid] = result
def project(self):
self.wait(self.task.source)
source = self.results[self.task.source]
data = pd.DataFrame(source.data[self.task.attributes])
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def select(self):
pred = self.task.predicate
pred = pred.replace(' and ', ' & ')
pred = pred.replace(' or ', ' | ')
pred = pred.replace(' not ', ' ~')
source = self.results[self.task.source]
#wait(source)
data = source.data.query(pred)
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
def union(self):
data = pd.concat([self.results[s].data
for s in self.task.sources]).drop_duplicates()
result = TransformResult(self.task, 1.0, data)
self.results[self.task.uuid] = result
class IncrementalExecutor(Executor):
def __init__(self, catalog, results, task):
Executor.__init__(self, catalog, results, task)
pos = 0
def classify(self):
y_data = self.results[self.task.label].data
X_data = self.results[self.task.features].data
y = np.array(y_data)
X = np.array(X_data)
cw = compute_class_weight('auto', np.array([0,1]), y)
cw = {0:cw[0],1:cw[1]}
print cw
b = get_classifier(self.task.classifier, cw)
tile_size = 1000
num_tiles = y.size / tile_size
for i in range(num_tiles):
pos = i * tile_size
X_sub = X[pos : pos + tile_size]
y_sub = y[pos : pos + tile_size]
y_prob = None
y_pred = None
if self.task.classifier == 'svm':
y_pred = b.predict(X_sub)
y_prob = np.array([[0,y] for y in y_pred])
else:
y_prob = b.predict_proba(X_sub)
y_pred = [1 if y[1] >= 0.5 else 0 for y in y_prob]
cm = confusion_matrix(y_sub, y_pred)
stats = classify_stats(cm, y_test, y_prob)
y_pred = pd.DataFrame(y_pred, columns=y_data.columns)
result = ClassifyResult(self.task, 1.0, b, stats)
self.results[self.task.uuid] = result
b.partial_fit(X_sub, y_sub)
def correlate(self):
raise NotImplementedError()
def difference(self):
raise NotImplementedError()
def feature_select(self):
raise NotImplementedError()
def intersect(self):
raise NotImplementedError()
def load(self):
schema_file = self.task.schema_file
data_file = self.task.data_file
data = pd.read_csv(data_file, nrows=0)
schema = None
if schema_file is None:
attrs = data.columns
dtypes = data.dtypes
schema = [(attr,str(dtypes[attr])) for attr in attrs]
else:
with open(self.task.schema_file) as f:
schema = json.load(f)
self.catalog[data_file] = {'uuid': self.task.uuid, 'schema': schema}
for chunk in pd.read_csv(data_file, chunksize=1000):
data.append(chunk, ignore_index=True)
result = TransformResult(self.task, 1.0, data) #actual size
self.results[self.task.uuid] = result
def project(self):
raise NotImplementedError()
def select(self):
raise NotImplementedError()
def union(self):
raise NotImplementedError()
class SparkExecutor(Executor):
def __init__(self, catalog, results, task):
Executor.__init__(self, catalog, results, task)
def classify(self):
raise NotImplementedError()
def correlate(self):
raise NotImplementedError()
def difference(self):
raise NotImplementedError()
def feature_select(self):
raise NotImplementedError()
def intersect(self):
raise NotImplementedError()
def load(self):
raise NotImplementedError()
def merge(self):
raise NotImplementedError()
def project(self):
raise NotImplementedError()
def select(self):
raise NotImplementedError()
def union(self):
raise NotImplementedError()
class TuplewareExecutor(Executor):
def __init__(self, catalog, results, task):
Executor.__init__(self, catalog, results, task)
def classify(self):
raise NotImplementedError()
def correlate(self):
raise NotImplementedError()
def difference(self):
raise NotImplementedError()
def feature_select(self):
raise NotImplementedError()
def intersect(self):
raise NotImplementedError()
def load(self):
raise NotImplementedError()
def merge(self):
raise NotImplementedError()
def project(self):
raise NotImplementedError()
def select(self):
raise NotImplementedError()
def union(self):
raise NotImplementedError()
| apache-2.0 |
mvnnn/tardis | tardis/io/model_reader.py | 5 | 7791 | #reading different model files
import numpy as np
from numpy import recfromtxt, genfromtxt
import pandas as pd
from astropy import units as u
import logging
# Adding logging support
logger = logging.getLogger(__name__)
from tardis.util import parse_quantity
class ConfigurationError(Exception):
pass
def read_density_file(density_filename, density_filetype, time_explosion, v_inner_boundary=0.0, v_outer_boundary=np.inf):
"""
read different density file formats
Parameters
----------
density_filename: ~str
filename or path of the density file
density_filetype: ~str
type of the density file
time_explosion: ~astropy.units.Quantity
time since explosion used to scale the density
"""
file_parsers = {'artis': read_artis_density,
'simple_ascii': read_simple_ascii_density}
time_of_model, index, v_inner, v_outer, unscaled_mean_densities = file_parsers[density_filetype](density_filename)
mean_densities = calculate_density_after_time(unscaled_mean_densities, time_of_model, time_explosion)
if v_inner_boundary > v_outer_boundary:
raise ConfigurationError('v_inner_boundary > v_outer_boundary '
'({0:s} > {1:s}). unphysical!'.format(
v_inner_boundary, v_outer_boundary))
if (not np.isclose(v_inner_boundary, 0.0 * u.km / u.s,
atol=1e-8 * u.km / u.s)
and v_inner_boundary > v_inner[0]):
if v_inner_boundary > v_outer[-1]:
raise ConfigurationError('Inner boundary selected outside of model')
inner_boundary_index = v_inner.searchsorted(v_inner_boundary) - 1
else:
inner_boundary_index = None
v_inner_boundary = v_inner[0]
logger.warning("v_inner_boundary requested too small for readin file."
" Boundary shifted to match file.")
if not np.isinf(v_outer_boundary) and v_outer_boundary < v_outer[-1]:
outer_boundary_index = v_outer.searchsorted(v_outer_boundary) + 1
else:
outer_boundary_index = None
v_outer_boundary = v_outer[-1]
logger.warning("v_outer_boundary requested too large for readin file. Boundary shifted to match file.")
v_inner = v_inner[inner_boundary_index:outer_boundary_index]
v_inner[0] = v_inner_boundary
v_outer = v_outer[inner_boundary_index:outer_boundary_index]
v_outer[-1] = v_outer_boundary
mean_densities = mean_densities[inner_boundary_index:outer_boundary_index]
return v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index
def read_abundances_file(abundance_filename, abundance_filetype, inner_boundary_index=None, outer_boundary_index=None):
"""
read different density file formats
Parameters
----------
abundance_filename: ~str
filename or path of the density file
abundance_filetype: ~str
type of the density file
inner_boundary_index: int
index of the inner shell, default None
outer_boundary_index: int
index of the outer shell, default None
"""
file_parsers = {'simple_ascii': read_simple_ascii_abundances,
'artis': read_simple_ascii_abundances}
index, abundances = file_parsers[abundance_filetype](abundance_filename)
if outer_boundary_index is not None:
outer_boundary_index_m1 = outer_boundary_index - 1
else:
outer_boundary_index_m1 = None
index = index[inner_boundary_index:outer_boundary_index]
abundances = abundances.ix[:, slice(inner_boundary_index, outer_boundary_index_m1)]
abundances.columns = np.arange(len(abundances.columns))
return index, abundances
def read_simple_ascii_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5 s
#index velocity [km/s] density [g/cm^3]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
time_of_model_string = fh.readline().strip()
time_of_model = parse_quantity(time_of_model_string)
data = recfromtxt(fname, skip_header=1, names=('index', 'velocity', 'density'), dtype=(int, float, float))
velocity = (data['velocity'] * u.km / u.s).to('cm/s')
v_inner, v_outer = velocity[:-1], velocity[1:]
mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]
return time_of_model, data['index'], v_inner, v_outer, mean_density
def read_artis_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5
#index velocity [km/s] log10(density) [log10(g/cm^3)]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
for i, line in enumerate(file(fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')
mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')[1:]
v_inner, v_outer = velocity[:-1], velocity[1:]
return time_of_model, artis_model['index'], v_inner, v_outer, mean_density
def read_simple_ascii_abundances(fname):
"""
Reading an abundance file of the following structure (example; lines starting with hash will be ignored):
The first line of abundances describe the abundances in the center of the model and are not used.
#index element1, element2, ..., element30
0 0.4 0.3, .. 0.2
Parameters
----------
fname: str
filename or path with filename
Returns
-------
index: ~np.ndarray
containing the indices
abundances: ~pandas.DataFrame
data frame containing index, element1 - element30 and columns according to the shells
"""
data = np.loadtxt(fname)
index = data[1:,0].astype(int)
abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))
return index, abundances
def calculate_density_after_time(densities, time_0, time_explosion):
"""
scale the density from an initial time of the model to the time of the explosion by ^-3
Parameters:
-----------
densities: ~astropy.units.Quantity
densities
time_0: ~astropy.units.Quantity
time of the model
time_explosion: ~astropy.units.Quantity
time to be scaled to
Returns:
--------
scaled_density
"""
return densities * (time_explosion / time_0) ** -3
| bsd-3-clause |
surhudm/scipy | scipy/integrate/quadrature.py | 33 | 28087 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/metrics/cluster/supervised.py | 21 | 26876 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
procoder317/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
Jai-Chaudhary/vislab | vislab/features/misc.py | 4 | 7427 | """
Copyright Sergey Karayev / Adobe - 2013.
Written during internship at Adobe CTL, San Francisco.
TODO:
- be robust to failures in computing some features in gbvs_saliency
and lab_hist
"""
import os
import numpy as np
from PIL import Image
import tempfile
import subprocess
import shlex
import scipy.io
import socket
import glob
import pandas as pd
import shutil
import vislab.image
def caffe(image_ids, image_filenames, layer='fc6', network='alexnet'):
import caffe
networks = {
'alexnet': {
'model_def_file': (
str(vislab.config['paths']['caffe'] +
'/examples/imagenet/imagenet_deploy.prototxt')
),
'pretrained_model': (
str(vislab.config['paths']['caffe'] +
'/examples/imagenet/caffe_reference_imagenet_model')
)
}
}
if network not in networks:
raise ValueError('Only networks supported: {}'.format(networks.keys()))
# Initialize the network (takes ~1 s)
net = caffe.Classifier(
networks[network]['model_def_file'],
networks[network]['pretrained_model'],
mean_file=vislab.config['paths']['caffe'] + '/python/caffe/imagenet/ilsvrc_2012_mean.npy',
channel_swap=(2, 1, 0), input_scale=255
)
# net = caffe.imagenet.ImageNetClassifier(**networks[network])
net.set_phase_test()
net.set_mode_cpu()
if layer not in net.blobs.keys():
raise ValueError('Only layers supported for this network: {}'.format(
net.blobs.keys()))
good_image_ids = []
feats = []
for image_id, image_filename in zip(image_ids, image_filenames):
try:
# First, run the network fully forward by calling predict.
# Then, for whatever blob we want, max across image crops.
net.predict([caffe.io.load_image(image_filename)])
feats.append(net.blobs[layer].data.max(0).flatten())
good_image_ids.append(image_id)
except:
continue
return good_image_ids, feats
def size(image_ids, image_filenames):
"""
Simply return the (h, w, area, aspect_ratio, has_color) of each image.
"""
good_image_ids = []
feats = []
for image_id, filename in zip(image_ids, image_filenames):
try:
image = vislab.dataset.get_image_for_filename(filename)
has_color = 1 if image.ndim > 2 else 0
h, w = image.shape[:2]
feat = np.array((h, w, h * w, float(h) / w, has_color))
good_image_ids.append(image_id)
feats.append(feat)
except:
continue
return good_image_ids, feats
def gist(image_ids, image_filenames, max_size=256):
import leargist
good_image_ids = []
feats = []
for image_id, filename in zip(image_ids, image_filenames):
try:
# TODO: resize image to a smaller size? like 128?
img = vislab.dataset.get_image_for_filename(filename)
assert(img.dtype == np.uint8)
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
h, w = img.shape[:2]
mode = 'RGBA'
rimg = img.reshape(img.shape[0] * img.shape[1], img.shape[2])
if len(rimg[0]) == 3:
rimg = np.c_[rimg, 255 * np.ones((len(rimg), 1), np.uint8)]
im = Image.frombuffer(
mode, (w, h), rimg.tostring(), 'raw', mode, 0, 1)
im.thumbnail((max_size, max_size), Image.ANTIALIAS)
feat = leargist.color_gist(im)
good_image_ids.append(image_id)
feats.append(feat)
except:
continue
return image_ids, feats
def lab_hist(image_ids, image_filenames):
"""
Standard feature as described in [1].
A histogram in L*a*b* space, having 4, 14, and 14 bins in each dimension
respectively, for a total of 784 dimensions.
[1] Palermo, F., Hays, J., & Efros, A. A. (2012).
Dating Historical Color Images. In ECCV.
"""
f, output_filename = tempfile.mkstemp()
image_filenames_cell = '{' + ','.join(
"'{}'".format(x) for x in image_filenames) + '}'
matlab = "addpath('matlab/lab_histogram'); lab_hist({}, '{}')".format(
image_filenames_cell, output_filename)
matlab_cmd = "matlab -nojvm -r \"try; {}; catch; exit; end; exit\"".format(
matlab)
print(matlab_cmd)
pid = subprocess.Popen(
shlex.split(matlab_cmd), stdout=open('/dev/null', 'w'))
retcode = pid.wait()
if retcode != 0:
raise Exception("Matlab script did not exit successfully!")
# Read features
feats = [x for x in np.loadtxt(output_filename)]
os.remove(output_filename)
assert(len(feats) == len(image_ids))
return image_ids, feats
def gbvs_saliency(image_ids, image_filenames):
f, output_filename = tempfile.mkstemp()
output_filename += '.mat'
image_ids_cell = '{' + ','.join(
"'{}'".format(x) for x in image_filenames) + '}'
matlab_script = "get_maps({}, '{}')".format(
image_ids_cell, output_filename)
matlab_cmd = "matlab -nojvm -r \"try; {}; catch; exit; end; exit\"".format(
matlab_script)
print(matlab_cmd)
pid = subprocess.Popen(
shlex.split(matlab_cmd),
cwd=os.path.expanduser('~/work/vislab/matlab/gbvs'))
retcode = pid.wait()
if retcode != 0:
raise Exception("Matlab script did not exit successfully!")
# Read features
try:
maps = scipy.io.loadmat(output_filename)['maps']
feats = [x for x in maps]
os.remove(output_filename)
except Exception as e:
raise Exception('Exception {} occured on {}'.format(
e, socket.gethostname()))
print("Successfully computed {} features".format(len(feats)))
assert(len(feats) == len(image_ids))
return image_ids, feats
def mc_bit(image_ids, image_filenames):
"""
Compute the mc_bit feature provided by the vlg_extractor package,
which should be installed in ext/.
"""
input_dirname = os.path.dirname(image_filenames[0])
image_filenames = [
os.path.relpath(fname, input_dirname) for fname in image_filenames]
f, list_filename = tempfile.mkstemp()
with open(list_filename, 'w') as f:
f.write('\n'.join(image_filenames) + '\n')
output_dirname = tempfile.mkdtemp()
cmd = './vlg_extractor.sh'
cmd += ' --parameters-dir={} --extract_mc_bit=ASCII {} {} {}'.format(
'data/picodes_data', list_filename, input_dirname, output_dirname)
print(cmd)
try:
print("Starting {}".format(cmd))
p = subprocess.Popen(
shlex.split(cmd),
cwd=os.path.expanduser(vislab.config['paths']['vlg_extractor'])
)
p.wait()
except Exception as e:
print(e)
raise Exception("Something went wrong with running vlg_extractor")
image_ids = []
feats = []
for filename in glob.glob(output_dirname + '/*_mc_bit.ascii'):
id_ = os.path.basename(filename.replace('_mc_bit.ascii', ''))
image_ids.append(id_)
feats.append(pd.read_csv(filename).values.flatten().astype(bool))
if p.returncode != 0 or len(feats) == 0:
raise Exception("Something went wrong with running vlg 2")
os.remove(list_filename)
shutil.rmtree(output_dirname)
return image_ids, feats
| bsd-2-clause |
cbertinato/pandas | pandas/util/_validators.py | 1 | 13041 | """
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame
args : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO: Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
| bsd-3-clause |
xuewei4d/scikit-learn | benchmarks/bench_sparsify.py | 20 | 3370 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features // 2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, max_iter=2000,
tol=None)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
framefreeze/HangDriver | lane_detection/lanedetection2.py | 1 | 5523 | from moviepy.editor import VideoFileClip
import matplotlib.pyplot as plt
import matplotlib.image as mplimg
import numpy as np
import cv2
blur_ksize = 5 # Gaussian blur kernel size
canny_lthreshold = 70 # Canny edge detection low threshold
canny_hthreshold = 150 # Canny edge detection high threshold
# Hough transform parameters
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 60
max_line_gap = 30
def roi_mask(img, vertices):
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
mask_color = (255,) * channel_count
else:
mask_color = 255
cv2.fillPoly(mask, vertices, mask_color)
masked_img = cv2.bitwise_and(img, mask)
return masked_img
def draw_roi(img, vertices):
cv2.polylines(img, vertices, True, [255, 0, 0], thickness=2)
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# draw_lines(line_img, lines)
draw_lanes(line_img, lines)
return line_img
def draw_lanes(img, lines, color=[255, 0, 0], thickness=8):
left_lines, right_lines = [], []
for line in lines:
for x1, y1, x2, y2 in line:
k = (y2 - y1) / (x2 - x1)
if k < 0:
left_lines.append(line)
else:
right_lines.append(line)
if (len(left_lines) <= 0 or len(right_lines) <= 0):
return img
clean_lines(left_lines, 0.1)
clean_lines(right_lines, 0.1)
left_points = [(x1, y1) for line in left_lines for x1,y1,x2,y2 in line]
left_points = left_points + [(x2, y2) for line in left_lines for x1,y1,x2,y2 in line]
right_points = [(x1, y1) for line in right_lines for x1,y1,x2,y2 in line]
right_points = right_points + [(x2, y2) for line in right_lines for x1,y1,x2,y2 in line]
left_vtx = calc_lane_vertices(left_points, 450, img.shape[0])
right_vtx = calc_lane_vertices(right_points, 450, img.shape[0])
cv2.line(img, left_vtx[0], left_vtx[1], color, thickness)
cv2.line(img, right_vtx[0], right_vtx[1], color, thickness)
def clean_lines(lines, threshold):
slope = [(y2 - y1) / (x2 - x1) for line in lines for x1, y1, x2, y2 in line]
while len(lines) > 0:
mean = np.mean(slope)
diff = [abs(s - mean) for s in slope]
idx = np.argmax(diff)
if diff[idx] > threshold:
slope.pop(idx)
lines.pop(idx)
else:
break
def calc_lane_vertices(point_list, ymin, ymax):
x = [p[0] for p in point_list]
y = [p[1] for p in point_list]
fit = np.polyfit(y, x, 1)
fit_fn = np.poly1d(fit)
xmin = int(fit_fn(ymin))
xmax = int(fit_fn(ymax))
return [(xmin, ymin), (xmax, ymax)]
def process_an_image(img):
roi_vtx = np.array([[(0+20, img.shape[0]-60), (600, 450), (720, 450), (img.shape[1]-150, img.shape[0]-60)]])
# extract yellow line and white line
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
'''
>>> yellow = np.uint8([[[255, 255, 0]]])
>>> hsv_yellow = cv2.cvtColor(yellow, cv2.COLOR_RGB2HSV)
>>> hsv_yellow
array([[[ 30, 255, 255]]], dtype=uint8)
Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively
'''
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([40, 255, 255])
yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# http://stackoverflow.com/questions/22588146/tracking-white-color-using-python-opencv
lower_white = np.array([0, 0, 215])
upper_white = np.array([180, 40, 255])
white_mask = cv2.inRange(hsv, lower_white, upper_white)
color_mask = cv2.bitwise_or(yellow_mask, white_mask)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
darken = (gray / 3).astype(np.uint8)
color_masked = cv2.bitwise_or(darken, color_mask)
blur_gray = cv2.GaussianBlur(color_masked, (blur_ksize, blur_ksize), 0, 0)
edges = cv2.Canny(blur_gray, canny_lthreshold, canny_hthreshold)
roi_edges = roi_mask(edges, roi_vtx)
line_img = hough_lines(roi_edges, rho, theta, threshold, min_line_length, max_line_gap)
res_img = cv2.addWeighted(img, 0.8, line_img, 1, 0)
'''
plt.figure()
plt.imshow(img)
plt.savefig('images/lane_original.png', bbox_inches='tight')
plt.figure()
plt.imshow(gray, cmap='gray')
plt.savefig('images/gray.png', bbox_inches='tight')
plt.figure()
plt.imshow(blur_gray, cmap='gray')
plt.savefig('images/blur_gray.png', bbox_inches='tight')
plt.figure()
plt.imshow(edges, cmap='gray')
plt.savefig('images/edges.png', bbox_inches='tight')
plt.figure()
plt.imshow(roi_edges, cmap='gray')
plt.savefig('images/roi_edges.png', bbox_inches='tight')
plt.figure()
plt.imshow(line_img, cmap='gray')
plt.savefig('images/line_img.png', bbox_inches='tight')
plt.figure()
plt.imshow(res_img)
plt.savefig('images/res_img.png', bbox_inches='tight')
plt.show()
'''
return res_img
# img = mplimg.imread("lane.jpg")
# process_an_image(img)
output = 'video_5_sol.mp4'
# clip = VideoFileClip("/Users/yujifan/Downloads/video_2.mp4")
clip = VideoFileClip("/Users/yujifan/Downloads/data/IMG_0906.MOV")
out_clip = clip.fl_image(process_an_image)
out_clip.write_videofile(output, audio=False) | mit |
ryklith/pyltesim | plotting/plot_miss_rate_analysis_seqDTX.py | 1 | 2111 | #!/usr/bin/env python
''' Generate the sum rate comparison plot for ICC 2013.
x axis: iterations
y axis: average miss rate
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "[email protected]"
__status__ = "Development"
def plot(filename):
""" Open data file, process, generate pdf and png"""
import numpy as np
import matplotlib.pyplot as plt
from utils import utils
rate = 1
# data comes in a csv
data = np.genfromtxt(filename, delimiter=',')
# first row is x-axis (number of users in cell). Each user has a fixed rate.
x = data[0] # Mbps
fig = plt.figure()
ax1 = fig.add_subplot(111)
# second row is BA
ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10)
# ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10)
# ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10)
ax1.plot(x, data[4], '-b*', label='Random alignment', markersize=10)
# ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10)
# ax1.plot(x, data[5], '-yx', label='Random once', markersize=10)
ax1.plot(x, data[6], '-gD', label='p-persistent SINR ranking', markersize=10)
# ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10)
ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10)
# plt.axis( [8, 41, 100, 440])
plt.legend(loc='lower right', prop={'size':20})
plt.setp(ax1.get_xticklabels(), fontsize=20)
plt.setp(ax1.get_yticklabels(), fontsize=20)
xlabel = 'OFDMA frames'
ylabel = 'Average miss count'
title = 'Average number of cells where target rate was missed at ' + str(rate) + ' bps'
ax1.set_xlabel(xlabel, size=20)
ax1.set_ylabel(ylabel, size=20)
# plt.title(title)
plt.savefig(filename+'.pdf', format='pdf')
plt.savefig(filename+'.png', format='png')
if __name__ == '__main__':
import sys
filename = sys.argv[1]
plot(filename)
| gpl-2.0 |
waterponey/scikit-learn | examples/ensemble/plot_isolation_forest.py | 39 | 2361 | """
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of normality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
DCSaunders/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 30 | 2249 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
TurkuNLP/SRNNMT | train.py | 1 | 5141 | from keras.models import Sequential, Graph, Model
from keras.layers import Dense, Dropout, Activation, Merge, Input, merge, Flatten,ActivityRegularization
# from keras.layers.core import Masking
from keras.layers.recurrent import GRU
# from keras.optimizers import SGD
# from keras.datasets import reuters
from keras.callbacks import Callback,ModelCheckpoint
from keras.layers.embeddings import Embedding
# from sklearn.feature_extraction import DictVectorizer
# from sklearn.metrics import f1_score, classification_report
# import codecs
# import numpy as np
# import gzip
import sys
import math
# from svm_pronouns import iter_data
import json
# import copy
# from data_dense import *
# from sklearn.metrics import recall_score
import data_dense
class CustomCallback(Callback):
def __init__(self, dev_data,dev_labels,index2label,model_name):
pass
def on_epoch_end(self, epoch, logs={}):
pass
model_name="gru_model"
minibatch_size=400
max_sent_len=200
vec_size=75
gru_width=75
ngrams=(4,)
ms=data_dense.Matrices(minibatch_size,max_sent_len,ngrams)
#Read vocabularies
src_f_name="data/all.train.fi.tokenized"
trg_f_name="data/all.train.en.tokenized"
vs=data_dense.read_vocabularies(model_name+"-vocab.pickle",src_f_name,trg_f_name,False,ngrams)
vs.trainable=False
#Inputs: list of one Input per N-gram size
src_inp=[Input(shape=(max_sent_len,), name="source_ngrams_{}".format(N), dtype="int32") for N in ngrams]
trg_inp=[Input(shape=(max_sent_len,), name="target_ngrams_{}".format(N), dtype="int32") for N in ngrams]
# sent len
src_len_inp=Input(shape=(1,), name="src_len", dtype="int32")
trg_len_inp=Input(shape=(1,), name="trg_len", dtype="int32")
#Embeddings: list of one Embedding per input
src_emb=[Embedding(len(vs.source_ngrams[N]), vec_size, input_length=max_sent_len, mask_zero=True, name="source_embedding_{}".format(N)) for N in ngrams]
trg_emb=[Embedding(len(vs.target_ngrams[N]), vec_size, input_length=max_sent_len, mask_zero=True, name="target_embedding_{}".format(N)) for N in ngrams]
# sent len
flattener1=Flatten()
flattener2=Flatten()
src_len_emb=Embedding(31,vec_size, input_length=1, name="src_len_emb")
#src_flattener=Flatten()
src_len_vec=flattener1(src_len_emb(src_len_inp))
trg_len_emb=Embedding(31,vec_size, input_length=1, name="trg_len_emb")
#trg_flattener=Flatten()
trg_len_vec=flattener2(trg_len_emb(trg_len_inp))
#Vectors: list of one embedded vector per input-embedding pair
src_vec=[src_emb_n(src_inp_n) for src_inp_n,src_emb_n in zip(src_inp,src_emb)]
trg_vec=[trg_emb_n(trg_inp_n) for trg_inp_n,trg_emb_n in zip(trg_inp,trg_emb)]
#RNNs: list of one GRU per ngram size
# forward
src_gru=[GRU(gru_width,consume_less="gpu",dropout_W=0.3,activation="relu",return_sequences=False,name="source_GRU_{}".format(N)) for N in ngrams]
trg_gru=[GRU(gru_width,consume_less="gpu",dropout_W=0.3,activation="relu",return_sequences=False,name="target_GRU_{}".format(N)) for N in ngrams]
src_gru_out=[src_gru_n(src_vec_n) for src_vec_n,src_gru_n in zip(src_vec,src_gru)]
trg_gru_out=[trg_gru_n(trg_vec_n) for trg_vec_n,trg_gru_n in zip(trg_vec,trg_gru)]
#Catenate the GRUs
src_gru_all=merge(src_gru_out+[src_len_vec],mode='concat',concat_axis=1,name="src_gru_concat")
trg_gru_all=merge(trg_gru_out+[trg_len_vec],mode='concat',concat_axis=1,name="trg_gru_concat")
src_dense_lin_out=Dense(gru_width,name="source_dense")(src_gru_all)
trg_dense_lin_out=Dense(gru_width,name="target_dense")(trg_gru_all)
#..regularize
#src_dense_reg=ActivityRegularization(l2=1.0,name="source_dense")
#trg_dense_reg=ActivityRegularization(l2=1.0,name="target_dense")
#src_dense_reg_out=src_dense_reg(src_dense_out)
#trg_dense_reg_out=trg_dense_reg(trg_dense_out)
#...and cosine between the source and target side
merged_out=merge([src_dense_lin_out,trg_dense_lin_out],mode='cos',dot_axes=1)
flatten=Flatten()
merged_out_flat=flatten(merged_out)
model=Model(input=src_inp+trg_inp+[src_len_inp,trg_len_inp], output=merged_out_flat)
model.compile(optimizer='adam',loss='mse')
print(model.summary())
inf_iter=data_dense.InfiniteDataIterator(src_f_name,trg_f_name)
batch_iter=data_dense.fill_batch(minibatch_size,max_sent_len,vs,inf_iter,ngrams)
#dev iter
dev_batch_iter=data_dense.fill_batch(minibatch_size,max_sent_len,vs,data_dense.InfiniteDataIterator("data/all.dev.new.fi.tokenized","data/all.dev.new.en.tokenized"),ngrams)
# import pdb
# pdb.set_trace()
# save model json
model_json = model.to_json()
with open(model_name+".json", "w") as json_file:
json_file.write(model_json)
# callback to save weights after each epoch
save_cb=ModelCheckpoint(filepath=model_name+".h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
samples_per_epoch=math.ceil((2*len(inf_iter.data))/minibatch_size/20)*minibatch_size #2* because we also have the negative examples
model.fit_generator(batch_iter,samples_per_epoch,60,callbacks=[save_cb],validation_data=dev_batch_iter,nb_val_samples=1000)
#counter=1
#while True:
# matrix_dict,target=batch_iter.__next__()
# print("BATCH", counter, "LOSS",model.train_on_batch(matrix_dict,target),file=sys.stderr,flush=True)
# counter+=1
| apache-2.0 |
rahul-c1/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 20 | 3128 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
assert_warns(DeprecationWarning, FactorAnalysis, verbose=1)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
zaxtax/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
nok/sklearn-porter | sklearn_porter/cli/__main__.py | 1 | 5881 | # -*- coding: utf-8 -*-
import sys
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from argparse import SUPPRESS
from argparse import _HelpAction
from os import sep
from os.path import isdir
from os.path import isfile
from sklearn.externals import joblib
from sklearn_porter import Porter
from sklearn_porter import meta
from sklearn_porter.language import *
def parse_args(args):
version = meta.get('version')
header = '''
#
### ### ### ### ### ###
# # # # # # ## #
### ### # ## ### # v{}
#'''.format(version)
summary = dict(
usage=header,
description=meta.get('description'),
epilog='More details on ' + meta.get('url'),
formatter_class=RawTextHelpFormatter,
add_help=False
)
p = ArgumentParser(**summary)
# Remove the default arguments group:
p._action_groups.pop()
# File arguments:
files = p.add_argument_group('File arguments')
help = 'Path to an exported estimator in pickle (.pkl) format.'
files.add_argument('input', help=help)
help = 'Path to the output directory where ' \
'the transpiled estimator will be stored.'
files.add_argument('--to', required=False, help=help)
# Template arguments:
templates = p.add_argument_group('Template arguments')
templates.add_argument('--class_name', default=None, required=False,
help='Define a custom class name.')
templates.add_argument('--method_name', default='predict', required=False,
help='Define a custom method name.')
# Optional arguments:
optional = p.add_argument_group('Optional arguments')
optional.add_argument('--export', '-e', required=False, default=False,
action='store_true', help='Whether to export '
'the model data or not.')
optional.add_argument('--checksum', '-s', required=False, default=False,
action='store_true', help='Whether to append the '
'checksum to the filename '
'or not.')
optional.add_argument('--data', '-d', required=False, default=False,
action='store_true', help='Whether to export just '
'the model data or all.')
optional.add_argument('--pipe', '-p', required=False, default=False,
action='store_true', help='Print the transpiled '
'estimator to the console.')
# Programming languages:
langs = p.add_argument_group('Programming languages')
languages = {key: clazz.LABEL for key, clazz in list(LANGUAGES.items())}
langs.add_argument('--language', '-l', choices=languages.keys(),
default='java', required=False, help=SUPPRESS)
for key, label in list(languages.items()):
help = 'Set \'{}\' as the target programming language.'.format(label)
langs.add_argument('--{}'.format(key), action='store_true', help=help)
# Extra arguments:
extras = p.add_argument_group('Extra arguments')
extras.add_argument('--version', '-v', action='version',
version='sklearn-porter v{}'.format(version),
help='Show the version number and exit.')
extras.add_argument('--help', '-h', action=_HelpAction,
help="Show this help message and exit.")
# Show help by default:
if len(sys.argv) == 1:
p.print_help(sys.stderr)
sys.exit(1)
# Return dictionary:
args = vars(p.parse_args(args))
return args
def main():
args = parse_args(sys.argv[1:])
# Check input data:
pkl_file_path = str(args.get('input'))
if not isfile(pkl_file_path):
exit_msg = 'No valid estimator in pickle ' \
'format was found at \'{}\'.'.format(pkl_file_path)
sys.exit('Error: {}'.format(exit_msg))
# Load data:
estimator = joblib.load(pkl_file_path)
# Determine the target programming language:
language = str(args.get('language')) # with default language
languages = ['c', 'java', 'js', 'go', 'php', 'ruby']
for key in languages:
if args.get(key): # found explicit assignment
language = key
break
# Define destination path:
dest_dir = str(args.get('to'))
if dest_dir == '' or not isdir(dest_dir):
dest_dir = pkl_file_path.split(sep)
del dest_dir[-1]
dest_dir = sep.join(dest_dir)
# Port estimator:
try:
class_name = args.get('class_name')
method_name = args.get('method_name')
with_export = bool(args.get('export'))
with_checksum = bool(args.get('checksum'))
porter = Porter(estimator, language=language)
output = porter.export(class_name=class_name, method_name=method_name,
export_dir=dest_dir, export_data=with_export,
export_append_checksum=with_checksum,
details=True)
except Exception as exception:
# Catch any exception and exit the process:
sys.exit('Error: {}'.format(str(exception)))
else:
# Print transpiled estimator to the console:
if bool(args.get('pipe', False)):
print(output.get('estimator'))
sys.exit(0)
only_data = bool(args.get('data'))
if not only_data:
filename = output.get('filename')
dest_path = dest_dir + sep + filename
# Save transpiled estimator:
with open(dest_path, 'w') as file_:
file_.write(output.get('estimator'))
if __name__ == "__main__":
main()
| mit |
marianotepper/dask | dask/array/core.py | 2 | 59930 | from __future__ import absolute_import, division, print_function
import operator
from operator import add, getitem
import inspect
from numbers import Number
from collections import Iterable
from bisect import bisect
from itertools import product, count
from collections import Iterator
from functools import partial, wraps
from toolz.curried import (pipe, partition, concat, unique, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
curry, reduce, interleave, sliding_window)
import numpy as np
from threading import Lock
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..utils import deepmap, ignoring, repr_long_list, concrete
from ..compatibility import unicode
from .. import threaded, core
from ..context import _globals
names = ('x_%d' % i for i in count(1))
tokens = ('-%d' % i for i in count(1))
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
from .optimization import optimize
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s+dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape)
keys = list(product([arr], *[range(len(bds)) for bds in chunks]))
values = [(getarray, arr, x) for x in slices_from_chunks(chunks)]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims)
for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarassingly parallel communication pattern and is read as
z_i = func(x_i, y_i)
More complex patterns may emerge, including multiple indices
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
"""
numblocks = kwargs['numblocks']
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def map_blocks(func, *arrs, **kwargs):
""" Map a function across all blocks of a dask array
You must also specify the chunks of the resulting array. If you don't then
we assume that the resulting array has the same block structure as the
input.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If function changes shape of the blocks then please provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
"""
if not callable(func):
raise TypeError("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)" %
type(func).__name__)
dtype = kwargs.get('dtype')
assert all(isinstance(arr, Array) for arr in arrs)
inds = [tuple(range(x.ndim))[::-1] for x in arrs]
args = list(concat(zip(arrs, inds)))
out_ind = tuple(range(max(x.ndim for x in arrs)))[::-1]
result = atop(func, out_ind, *args, dtype=dtype)
# If func has block_id as an argument then swap out func
# for func with block_id partialed in
try:
spec = inspect.getargspec(func)
except:
spec = None
if spec and 'block_id' in spec.args:
for k in core.flatten(result._keys()):
result.dask[k] = (partial(func, block_id=k[1:]),) + result.dask[k][1:]
# Assert user specified chunks
chunks = kwargs.get('chunks')
if chunks is not None and chunks and not isinstance(chunks[0], tuple):
chunks = tuple([nb * (bs,)
for nb, bs in zip(result.numblocks, chunks)])
if chunks is not None:
result.chunks = chunks
return result
@wraps(np.squeeze)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([b.name], *[range(len(bd)) for bd in chunks]))
dsk = b.dask.copy()
for o, n in zip(old_keys, new_keys):
dsk[n] = dsk[o]
del dsk[o]
return Array(dsk, b.name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
Runs in near linear time, returns all results in a single chunk so
all k elements must fit in memory.
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
name = next(names)
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = next(names)
dsk[(name2, 0)] = (getitem,
(np.sort, (np.concatenate, (list, list(dsk)))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(merge(dsk, x.dask), name2, chunks, dtype=x.dtype)
def compute(*args, **kwargs):
""" Evaluate several dask arrays at once
The result of this function is always a tuple of numpy arrays. To evaluate
a single dask array into a numpy array, use ``myarray.compute()`` or simply
``np.array(myarray)``.
Examples
--------
>>> import dask.array as da
>>> d = da.ones((4, 4), chunks=(2, 2))
>>> a = d + 1 # two different dask arrays
>>> b = d + 2
>>> A, B = da.compute(a, b) # Compute both simultaneously
"""
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
results2 = tuple(concatenate3(x) if arg.shape else unpack_singleton(x)
for x, arg in zip(results, args))
return results2
def store(sources, targets, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
updates = [insert_to_ooc(tgt, src) for tgt, src in zip(targets, sources)]
dsk = merge([src.dask for src in sources] + updates)
keys = [key for u in updates for key in u]
get(dsk, keys, **kwargs)
def blockdims_from_blockshape(shape, blockshape):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
if blockshape is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, blockshape))
class Array(object):
""" Parallel Array
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
"""
__slots__ = 'dask', 'name', 'chunks', '_dtype'
def __init__(self, dask, name, chunks, dtype=None, shape=None):
self.dask = dask
self.name = name
self.chunks = normalize_chunks(chunks, shape)
if dtype is not None:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def _args(self):
return (self.dask, self.name, self.chunks, self.dtype)
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def shape(self):
return tuple(map(sum, self.chunks))
def __len__(self):
return sum(self.chunks[0])
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
@memoize(key=lambda args, kwargs: (id(args[0]), args[0].name, args[0].chunks))
def dtype(self):
if self._dtype is not None:
return self._dtype
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
chunks = '(' + ', '.join(map(repr_long_list, self.chunks)) + ')'
return ("dask.array<%s, shape=%s, chunks=%s, dtype=%s>" %
(self.name, self.shape, chunks, self._dtype))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return np.prod(self.shape)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
def _keys(self, *args):
if self.ndim == 0:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
return [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
return [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See also
--------
da.store
h5py.File.create_dataset
"""
import h5py
with h5py.File(filename) as f:
if 'chunks' not in kwargs:
kwargs['chunks'] = tuple([c[0] for c in self.chunks])
d = f.require_dataset(datapath, shape=self.shape, dtype=self.dtype, **kwargs)
slices = slices_from_chunks(self.chunks)
name = next(names)
dsk = dict(((name,) + t[1:], (write_hdf5_chunk, filename, datapath, slc, t))
for t, slc in zip(core.flatten(self._keys()), slices))
myget = kwargs.get('get', get)
myget(merge(dsk, self.dask), list(dsk.keys()))
@wraps(compute)
def compute(self, **kwargs):
result, = compute(self, **kwargs)
return result
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
( isinstance(index, list)
and all(isinstance(i, (str, unicode)) for i in index))):
if self._dtype is not None and isinstance(index, (str, unicode)):
dt = self._dtype[index]
elif self._dtype is not None and isinstance(index, list):
dt = np.dtype([(name, self._dtype[name]) for name in index])
else:
dt = None
return elemwise(getarray, self, index, dtype=dt)
# Slicing
out = next(names)
if not isinstance(index, tuple):
index = (index,)
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
return Array(merge(self.dask, dsk), out, chunks, dtype=self._dtype)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other, axes=((self.ndim-1,), (other.ndim-2,)))
@property
def T(self):
return transpose(self)
@wraps(np.transpose)
def transpose(self, axes=None):
return transpose(self, axes)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
""" Copy of the array, cast to a specified type """
return elemwise(partial(np.ndarray.astype, dtype=dtype, **kwargs),
self, dtype=dtype)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims)
@wraps(np.all)
def all(self, axis=None, keepdims=False):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims)
@wraps(np.min)
def min(self, axis=None, keepdims=False):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims)
@wraps(np.max)
def max(self, axis=None, keepdims=False):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims)
@wraps(np.argmin)
def argmin(self, axis=None):
from .reductions import argmin
return argmin(self, axis=axis)
@wraps(np.argmax)
def argmax(self, axis=None):
from .reductions import argmax
return argmax(self, axis=axis)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
def vnorm(self, ord=None, axis=None, keepdims=False):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims)
@wraps(map_blocks)
def map_blocks(self, func, chunks=None, dtype=None):
return map_blocks(func, self, chunks=chunks, dtype=dtype)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str
how to handle the boundaries. Values include 'reflect', 'periodic'
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks):
from .rechunk import rechunk
return rechunk(self, chunks)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks:
if shape is None:
chunks = ()
else:
chunks = ((),) * len(shape)
if chunks and not isinstance(chunks[0], (tuple, list)):
chunks = blockdims_from_blockshape(shape, chunks)
chunks = tuple(map(tuple, chunks))
return chunks
def from_array(x, chunks, name=None, lock=False, **kwargs):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
The ``chunks`` argument must be one of the following forms:
- a blocksize like 1000
- a blockshape like (1000, 1000)
- explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
name = name or next(names)
dsk = getem(name, chunks)
if lock is True:
lock = Lock()
if lock:
dsk = dict((k, v + (lock,)) for k, v in dsk.items())
return Array(merge({name: x}, dsk), name, chunks, dtype=x.dtype)
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Parameters
----------
func: callable
Function to apply to individual tuples of blocks
out_ind: iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
This is best explained through example. Consider the following examples:
Examples
--------
2D embarassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij') # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterator of blocks on any array that holds that
index.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i') # doctest: +SKIP
Many dask.array operations are special cases of atop. These tensor
operations cover a broad subset of NumPy and this function has been battle
tested, supporting tricky concepts like broadcasting.
See also:
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) or next(names)
dtype = kwargs.get('dtype', None)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
shapes = dict((a.name, a.shape) for a, _ in arginds)
nameinds = [(a.name, i) for a, i in arginds]
dims = broadcast_dimensions(nameinds, shapes)
shape = tuple(dims[i] for i in out_ind)
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict)
chunks = tuple(chunkss[i] for i in out_ind)
dsks = [a.dask for a, _ in arginds]
return Array(merge(dsk, *dsks), out, chunks, dtype=dtype)
def get(dsk, keys, get=None, **kwargs):
""" Specialized get function
1. Handle inlining
2. Use custom score function
"""
get = get or _globals['get'] or threaded.get
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while True:
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
stacked_names = ('stack-%d' % i for i in count(1))
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
assert len(set(a.chunks for a in seq)) == 1 # same chunks
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
chunks = ( seq[0].chunks[:axis]
+ ((1,) * n,)
+ seq[0].chunks[axis:])
name = next(stacked_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
names = [a.name for a in seq]
inputs = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getarray, inp, (slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
concatenate_names = ('concatenate-%d' % i for i in count(1))
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
raise ValueError("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
bds = [a.chunks for a in seq]
if not all(len(set(bds[i][j] for i in range(n))) == 1
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")
shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
chunks = ( seq[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].chunks[axis + 1:])
name = next(concatenate_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
names = [a.name for a in seq]
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1:axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
+ key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
@wraps(np.take)
def take(a, indices, axis):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
return a[(slice(None),) * axis + (indices,)]
@wraps(np.transpose)
def transpose(a, axes=None):
axes = axes or tuple(range(a.ndim))[::-1]
return atop(curry(np.transpose, axes=axes),
axes,
a, tuple(range(a.ndim)), dtype=a._dtype)
@curry
def many(a, b, binop=None, reduction=None, **kwargs):
"""
Apply binary operator to pairwise to sequences, then reduce.
>>> many([1, 2, 3], [10, 20, 30], mul, sum) # dot product
140
"""
return reduction(map(curry(binop, **kwargs), a, b))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
out_index.remove(left_index[l])
right_index[r] = left_index[l]
if lhs._dtype is not None and rhs._dtype is not None :
dt = np.promote_types(lhs._dtype, rhs._dtype)
else:
dt = None
func = many(binop=np.tensordot, reduction=sum,
axes=(left_axes, right_axes))
return atop(func,
out_index,
lhs, tuple(left_index),
rhs, tuple(right_index), dtype=dt)
def insert_to_ooc(out, arr):
lock = Lock()
def store(x, index):
with lock:
out[index] = np.asanyarray(x)
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, t, slc))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def partial_by_order(op, other):
"""
>>> f = partial_by_order(add, [(1, 10)])
>>> f(5)
15
"""
def f(*args):
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return op(*args2)
return f
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See also
--------
atop
"""
name = kwargs.get('name') or next(names)
out_ndim = max(len(arg.shape) if isinstance(arg, Array) else 0
for arg in args)
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [arg for arg in args if isinstance(arg, Array)]
other = [(i, arg) for i, arg in enumerate(args) if not isinstance(arg, Array)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
elif not all(a._dtype is not None for a in arrays):
dt = None
else:
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if hasattr(a, 'dtype') else a
for a in args]
try:
dt = op(*vals).dtype
except AttributeError:
dt = None
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
return atop(op2, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
def wrap_elemwise(func, **kwargs):
""" Wrap up numpy function into dask.array """
f = partial(elemwise, func, **kwargs)
f.__doc__ = func.__doc__
f.__name__ = func.__name__
return f
# ufuncs, copied from this page:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
logaddexp = wrap_elemwise(np.logaddexp)
logaddexp2 = wrap_elemwise(np.logaddexp2)
conj = wrap_elemwise(np.conj)
exp = wrap_elemwise(np.exp)
log = wrap_elemwise(np.log)
log2 = wrap_elemwise(np.log2)
log10 = wrap_elemwise(np.log10)
log1p = wrap_elemwise(np.log1p)
expm1 = wrap_elemwise(np.expm1)
sqrt = wrap_elemwise(np.sqrt)
square = wrap_elemwise(np.square)
# trigonometric functions
sin = wrap_elemwise(np.sin)
cos = wrap_elemwise(np.cos)
tan = wrap_elemwise(np.tan)
arcsin = wrap_elemwise(np.arcsin)
arccos = wrap_elemwise(np.arccos)
arctan = wrap_elemwise(np.arctan)
arctan2 = wrap_elemwise(np.arctan2)
hypot = wrap_elemwise(np.hypot)
sinh = wrap_elemwise(np.sinh)
cosh = wrap_elemwise(np.cosh)
tanh = wrap_elemwise(np.tanh)
arcsinh = wrap_elemwise(np.arcsinh)
arccosh = wrap_elemwise(np.arccosh)
arctanh = wrap_elemwise(np.arctanh)
deg2rad = wrap_elemwise(np.deg2rad)
rad2deg = wrap_elemwise(np.rad2deg)
# comparison functions
logical_and = wrap_elemwise(np.logical_and, dtype='bool')
logical_or = wrap_elemwise(np.logical_or, dtype='bool')
logical_xor = wrap_elemwise(np.logical_xor, dtype='bool')
logical_not = wrap_elemwise(np.logical_not, dtype='bool')
maximum = wrap_elemwise(np.maximum)
minimum = wrap_elemwise(np.minimum)
fmax = wrap_elemwise(np.fmax)
fmin = wrap_elemwise(np.fmin)
# floating functions
isreal = wrap_elemwise(np.isreal, dtype='bool')
iscomplex = wrap_elemwise(np.iscomplex, dtype='bool')
isfinite = wrap_elemwise(np.isfinite, dtype='bool')
isinf = wrap_elemwise(np.isinf, dtype='bool')
isnan = wrap_elemwise(np.isnan, dtype='bool')
signbit = wrap_elemwise(np.signbit, dtype='bool')
copysign = wrap_elemwise(np.copysign)
nextafter = wrap_elemwise(np.nextafter)
# modf: see below
ldexp = wrap_elemwise(np.ldexp)
# frexp: see below
fmod = wrap_elemwise(np.fmod)
floor = wrap_elemwise(np.floor)
ceil = wrap_elemwise(np.ceil)
trunc = wrap_elemwise(np.trunc)
# more math routines, from this page:
# http://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = wrap_elemwise(np.degrees)
radians = wrap_elemwise(np.radians)
rint = wrap_elemwise(np.rint)
fix = wrap_elemwise(np.fix)
angle = wrap_elemwise(np.angle)
real = wrap_elemwise(np.real)
imag = wrap_elemwise(np.imag)
clip = wrap_elemwise(np.clip)
fabs = wrap_elemwise(np.fabs)
sign = wrap_elemwise(np.fabs)
def frexp(x):
tmp = elemwise(np.frexp, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.frexp(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
frexp.__doc__ = np.frexp
def modf(x):
tmp = elemwise(np.modf, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.modf(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
modf.__doc__ = np.modf
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes):
if not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i]):
raise ValueError(
"Coarsening factor does not align with block dimensions")
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = next(names)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
if x._dtype is not None:
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
else:
dt = None
return Array(merge(x.dask, dsk), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = next(names)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = next(names)
chunks = (tuple((s,) for s in shape[:ndim_new])
+ tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape,
shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key,
shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=x.dtype)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
fromfunction_names = ('fromfunction-%d' % i for i in count(1))
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
name = next(fromfunction_names)
if chunks:
chunks = normalize_chunks(chunks, shape)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shape)
for offset, shape in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = next(names)
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = get(merge(dsk, x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
def write_hdf5_chunk(fn, datapath, index, data):
import h5py
with h5py.File(fn) as f:
d = f[datapath]
d[index] = data
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
name = 'bincount' + next(tokens)
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = 'f8'
else:
dsk = dict(((name, i),
(np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = 'i8'
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum' + next(tokens)
dsk[(name, 0)] = (np.sum, (list, list(dsk)), 0)
chunks = ((minlength,),)
dsk.update(x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
"""
result = []
dim = 0
while isinstance(arrays, (list, tuple)):
result.append(tuple(deepfirst(a).shape[dim] for a in arrays))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
else:
return 1 + ndimlist(seq[0])
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
result = np.empty(shape=shape, dtype=deepfirst(arrays).dtype)
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
| bsd-3-clause |
streettraffic/streettraffic | streettraffic/tests/test_utility.py | 1 | 4127 | from ..map_resource.utility import Utility
from .. import tools
import pandas as pd
settings = {
'app_id': 'F8aPRXcW3MmyUvQ8Z3J9',
'app_code' : 'IVp1_zoGHdLdz0GvD_Eqsw',
'map_tile_base_url': 'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/',
'json_tile_base_url': 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?'
}
util = Utility(settings)
def test_get_tile():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/enterprise-map-tile/topics/key-concepts.html
"""
assert util.get_tile(52.525439, 13.38727, 12) == [2200, 1343]
def test_get_quadkeys():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/traffic/common/map_tile/topics/quadkeys.html
"""
assert util.get_quadkeys(35210, 21493, 16) == "1202102332221212"
def test_get_map_tile_resource():
assert util.get_map_tile_resource((33.670156, -84.325984),"latlon", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
assert util.get_map_tile_resource((4354, 6562),"colrow", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
def test_get_traffic_json_resource():
assert util.get_traffic_json_resource((34.9237, -82.4383), "latlon", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
assert util.get_traffic_json_resource((4440, 6493), "colrow", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
def test_get_area_tile_matrix():
df1 = pd.DataFrame([[(4350, 6557),(4351, 6557),(4352, 6557)],
[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df2 = pd.DataFrame([[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df3 = pd.DataFrame([[(4351, 6557),(4352, 6557)],
[(4351, 6558),(4352, 6558)],
[(4351, 6559),(4352, 6559)]])
assert df1.equals(util.get_area_tile_matrix([(33.766764, -84.409533), (33.740003, -84.368978)], 14))
assert df2.equals(util.get_area_tile_matrix([(33.741455, -84.397218), (33.744203, -84.369581)], 14)) # asymmetrical case 1
assert df3.equals(util.get_area_tile_matrix([(33.728999, -84.395856), (33.775902, -84.363917)], 14)) # asymmetrical case 2
def test_get_area_tile_matrix_url():
df = tools.load_data_object("test_data/get_area_tile_matrix_url() for map_tile.pkl")
cor1 = (33.766764, -84.409533)
cor2 = (33.740003, -84.368978)
info = util.get_area_tile_matrix([cor1, cor2], 14)
matrix = util.get_area_tile_matrix_url("map_tile", [cor1, cor2], 14)
assert df.equals(matrix)
def test_get_distance():
assert util.get_distance((33.70524,-84.40353), (33.71337,-84.39347)) == 1297.72758534478
def test_read_geojson_polygon():
assert util.read_geojson_polygon('{ "type": "FeatureCollection", "features": [ { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [ -84.39285278320312, 33.76266589608855 ], [ -84.3738842010498, 33.770015152780125 ], [ -84.3610954284668, 33.7613101391079 ], [ -84.37019348144531, 33.74468253332004 ], [ -84.38830375671387, 33.751391054166746 ], [ -84.39705848693848, 33.758384485188 ], [ -84.39285278320312, 33.76266589608855 ] ] ] }, "properties": {} } ] }') == [[33.76266589608855,-84.39285278320312],[33.770015152780125,-84.3738842010498],[33.7613101391079,-84.3610954284668],[33.74468253332004,-84.37019348144531],[33.751391054166746,-84.38830375671387],[33.758384485188,-84.39705848693848],[33.76266589608855,-84.39285278320312]] | mit |
mblondel/scikit-learn | sklearn/cross_validation.py | 7 | 65132 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if isinstance(train_size, numbers.Integral):
self.train_size = train_size
elif (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold, indices=None):
super(PredefinedSplit, self).__init__(len(test_fold), indices)
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
chrsrds/scikit-learn | examples/gaussian_process/plot_gpc.py | 6 | 3993 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure()
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data",
edgecolors=(0, 0, 0))
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data",
edgecolors=(0, 0, 0))
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure()
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 18 | 2444 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
thunderhoser/GewitterGefahr | gewittergefahr/dissertation/plot_overfitting_graph.py | 1 | 2781 | """Plots graph to explain overfitting."""
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
ORIG_LOSS = 2.
TRAINING_DECAY_RATE = 1. / 25
FIRST_VALIDN_DECAY_RATE = 1. / 35
SECOND_VALIDN_DECAY_RATE = 1. / 140
SECOND_VALIDN_START_EPOCH = 250
CUTOFF_EPOCH = 50
MAX_EPOCH_TO_PLOT = 75
MAIN_LINE_WIDTH = 4
REFERENCE_LINE_WIDTH = 2
REFERENCE_LINE_COLOUR = numpy.full(3, 152. / 255)
TRAINING_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
VALIDATION_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
FIGURE_WIDTH_INCHES = FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 600
OUTPUT_FILE_NAME = (
'/localdata/ryan.lagerquist/eager/dissertation_figures/'
'overfitting_graph.jpg'
)
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
def _run():
"""Plots schema to explain overfitting.
This is effectively the main method.
"""
epoch_indices = numpy.linspace(
0, MAX_EPOCH_TO_PLOT, num=MAX_EPOCH_TO_PLOT + 1, dtype=int
)
training_losses = ORIG_LOSS * numpy.exp(
-epoch_indices * TRAINING_DECAY_RATE
)
validation_losses = ORIG_LOSS * numpy.exp(
-epoch_indices * FIRST_VALIDN_DECAY_RATE
)
second_validation_losses = ORIG_LOSS * numpy.exp(
-(SECOND_VALIDN_START_EPOCH - epoch_indices) * SECOND_VALIDN_DECAY_RATE
)
validation_losses[CUTOFF_EPOCH:] = second_validation_losses[CUTOFF_EPOCH:]
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
axes_object.plot(
epoch_indices, training_losses, linewidth=MAIN_LINE_WIDTH,
color=TRAINING_COLOUR, label='Training')
axes_object.plot(
epoch_indices, validation_losses, linewidth=MAIN_LINE_WIDTH,
color=VALIDATION_COLOUR, label='Validation')
y_limits = numpy.array([0, ORIG_LOSS])
axes_object.set_ylim(y_limits)
axes_object.set_xlim([0, MAX_EPOCH_TO_PLOT])
axes_object.plot(
numpy.full(2, CUTOFF_EPOCH), y_limits, linestyle='dashed',
linewidth=REFERENCE_LINE_WIDTH, color=REFERENCE_LINE_COLOUR,
label='Overfitting starts here'
)
axes_object.set_xlabel('Epoch')
axes_object.set_ylabel('Loss')
axes_object.legend(loc='lower left')
print('Saving figure to: "{0:s}"...'.format(OUTPUT_FILE_NAME))
figure_object.savefig(
OUTPUT_FILE_NAME, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
if __name__ == '__main__':
_run()
| mit |
xubenben/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_scale.py | 7 | 1102 | from __future__ import print_function
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
import io
@image_comparison(baseline_images=['log_scales'], remove_text=True)
def test_log_scales():
ax = plt.subplot(122, yscale='log', xscale='symlog')
ax.axvline(24.1)
ax.axhline(24.1)
@image_comparison(baseline_images=['logit_scales'], remove_text=True,
extensions=['png'])
def test_logit_scales():
ax = plt.subplot(111, xscale='logit')
# Typical extinction curve for logit
x = np.array([0.001, 0.003, 0.01, 0.03, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.97, 0.99, 0.997, 0.999])
y = 1.0 / x
ax.plot(x, y)
ax.grid(True)
@cleanup
def test_log_scatter():
"""Issue #1799"""
fig, ax = plt.subplots(1)
x = np.arange(10)
y = np.arange(10) - 1
ax.scatter(x, y)
buf = io.BytesIO()
fig.savefig(buf, format='pdf')
buf = io.BytesIO()
fig.savefig(buf, format='eps')
buf = io.BytesIO()
fig.savefig(buf, format='svg')
| apache-2.0 |
shenzebang/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
zeyuanxy/fast-rcnn | lib/roi_data_layer/minibatch.py | 44 | 7337 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
pepincho/Python-Course-FMI | 02_challenge/render.py | 2 | 1718 | import argparse
from PIL import Image
from matplotlib import pyplot
import solution
parser = argparse.ArgumentParser(description='Visualize your homework.')
parser.add_argument('file', type=str, help="JPEG file to manipulate")
parser.add_argument('operation',
choices=['create_histogram', 'lighten', 'darken',
'invert', 'rotate_left', 'rotate_right'],
help="Operation to be executed on given image.")
parser.add_argument('args', default=[], nargs="*", type=float,
help="Opearion arguments")
args = parser.parse_args()
image = Image.open(args.file)
pixels = list(image.getdata())
picture = []
for h in range(image.size[1]):
picture.append(pixels[(h*image.size[0]):((h+1)*image.size[0])])
try:
operation_result = getattr(solution, args.operation)(picture, *args.args)
filename = '{}_{}.jpg'.format(args.file.split('.')[0], args.operation)
except Exception as exc:
print("There's something wrong with your implementation of "
"{0}()!\n".format(args.operation))
raise exc
if args.operation == 'create_histogram':
for color, histogram in operation_result.items():
pyplot.bar(histogram.keys(), histogram.values(), alpha=0.6,
color=color)
pyplot.savefig(filename)
else:
new_pixels = [pixel for row in operation_result for pixel in row]
new_image = None
if args.operation in ('rotate_left', 'rotate_right'):
new_image = Image.new('RGB', (image.size[1], image.size[0]))
else:
new_image = Image.new('RGB', image.size)
new_image.putdata(new_pixels)
new_image.save(filename, 'JPEG')
print("File saved as {}".format(filename))
| mit |
bigdataelephants/scikit-learn | sklearn/manifold/t_sne.py | 4 | 19903 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=[]):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i+1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def _fit(self, X):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = self.n_components - 1.0
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._fit(X)
return self.embedding_
| bsd-3-clause |
frank-tancf/scikit-learn | sklearn/decomposition/dict_learning.py | 42 | 46134 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
jart/tensorflow | tensorflow/contrib/learn/__init__.py | 17 | 2736 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning (DEPRECATED).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/kmpfit_Oreardata.py | 1 | 4356 | #!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Program to straight line parameters
# to data with errors in both coordinates.
# Use data from Orear's article
# Vog, 10 Dec, 2011
#
# The data is from a real physics experiment. Orear,
# Am. J.Phys., Vol.50, No. 10, October 1982, lists values
# for a and b that are not comparable to what we find with
# kmpfit. In an erratum
# Am. J.Phys., Vol.52, No. 3, March 1984, he published new
# values that are the same as we find with kmpfit. This
# is after an improvement of the minimalization of the
# objective function where the parameters and the weights
# are iterated together rather than alternately.
# The literature values are printed as output of this script.
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from kapteyn import kmpfit
def model(p, x):
a, b = p
return a*x - b/x
def residuals(p, data):
# Residuals function for data with errors in both coordinates
a, b = p
x, y, ex, ey = data
w = ey*ey + ex*ex*(a+b/x**2)**2
wi = numpy.sqrt(numpy.where(w==0.0, 0.0, 1.0/(w)))
d = wi*(y-model(p,x))
return d
def residuals2(p, data):
# Residuals function for data with errors in y only
a, b = p
x, y, ey = data
wi = numpy.where(ey==0.0, 0.0, 1.0/ey)
d = wi*(y-model(p,x))
return d
def residuals3(p, data):
# Minimum distance formula with expression for x_model
a, b = p
x, y, ex, ey = data
wx = numpy.where(ex==0.0, 0.0, 1.0/(ex*ex))
wy = numpy.where(ey==0.0, 0.0, 1.0/(ey*ey))
df = a + b/(x*x)
# Calculated the approximate values for the model
x0 = x + (wy*(y-model(p,x))*df)/(wx+wy*df*df)
y0 = model(p,x0)
D = numpy.sqrt( wx*(x-x0)**2+wy*(y-y0)**2 )
return D
# Create the data
N = 20
beta0 = [0.1,650000] # Initial estimates
y = numpy.array([-4.017, -2.742, -1.1478, 1.491, 6.873])
x = numpy.array([22000.0, 22930, 23880, 25130, 26390])
N = len(y)
errx = numpy.array([440.0, 470, 500, 530, 540])
erry = numpy.array([0.5, 0.25, 0.08, 0.09, 1.90])
print("\Literature values:")
print("===================")
print("Orear's iteration method: a, b, min chi^2:", 1.0163e-3, 5.937e5, 2.187)
print("Orear's exact method: a, b, min chi^2:", 1.0731e-3, 6.250e5, 2.134)
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errx, erry))
fitobj.fit(params0=beta0)
print("\n\n======== Results kmpfit: weights for both coordinates =========")
print("Params: ", fitobj.params)
print("Covariance errors: ", fitobj.xerror)
print("Standard errors ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Iterations: ", fitobj.niter)
print("Status: ", fitobj.message)
# Prepare fit routine
fitobj2 = kmpfit.Fitter(residuals=residuals2, data=(x, y, erry))
fitobj2.fit(params0=beta0)
print("\n\n======== Results kmpfit errors in Y only =========")
print("Params: ", fitobj2.params)
print("Covariance errors: ", fitobj2.xerror)
print("Standard errors ", fitobj2.stderr)
print("Chi^2 min: ", fitobj2.chi2_min)
print("Reduced Chi^2: ", fitobj2.rchi2_min)
# Prepare fit routine
fitobj3 = kmpfit.Fitter(residuals=residuals3, data=(x, y, errx, erry))
fitobj3.fit(params0=beta0)
print("\n\n======== Results kmpfit with distance formula =========")
print("Params: ", fitobj3.params)
print("Covariance errors: ", fitobj3.xerror)
print("Standard errors ", fitobj3.stderr)
print("Chi^2 min: ", fitobj3.chi2_min)
print("Reduced Chi^2: ", fitobj3.rchi2_min)
print("Iterations: ", fitobj3.niter)
print("Status: ", fitobj3.message)
# Some plotting
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure(1)
frame = fig.add_subplot(1,1,1)
frame.errorbar(x, y, xerr=errx, yerr=erry, fmt='bo')
frame.plot(x, model(fitobj.params,x), 'c', ls='--', lw=2, label="kmpfit errors in x and y")
frame.plot(x, model(fitobj2.params,x), 'g', label="kmpfit errors in y only")
frame.set_xlabel("X")
frame.set_ylabel("Y")
frame.set_title("$\mathrm{Orear's\ data\ and\ model:\ } y=a*x - b/x$")
leg = frame.legend(loc=2)
show() | bsd-3-clause |
tosolveit/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
mhdella/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
EnvGen/toolbox | scripts/raw_counts.py | 1 | 1863 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
"""A script to combine raw counts for contigs or genes
"""
import sys
import argparse
import pandas as pd
import re
def gene_lengths_from_gff(gff_file):
gene_id_regex = re.compile('ID=([a-zA-Z_\-0-9]*);')
gene_lengths = {}
with open(gff_file) as fh:
for line in fh:
gene_id = gene_id_regex.findall(line)[0]
gene_lengths[gene_id] = abs(int(line.split(' ')[4]) - int(line.split(' ')[3])) + 1
return pd.Series(gene_lengths)
def main(args):
gene_lengths = gene_lengths_from_gff(args.gff)
df = None
for fn, sample_name in zip(args.coverage_files, args.sample_names):
count_df = pd.read_table(fn, index_col=0, header=None,
names=['gene_id', sample_name], compression=args.input_compression)
if df is None:
df = count_df
else:
df[sample_name] = count_df[sample_name]
df['gene_length'] = gene_lengths
df[['gene_length'] + list(args.sample_names)].to_csv(sys.stdout, sep='\t')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = __doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--sample_names', nargs='*',
help=("Sample names, in the same order as coverage_files"))
parser.add_argument('--coverage_files', nargs='*',
help=("Coverage files with tab separated values: "
"sequence id, average coverage, sequence length"))
parser.add_argument('--gff',
help=("GFF version 2 file"))
parser.add_argument("--input_compression", default=None, choices=[None, 'gzip'],
help="Compression type for input coverage files. Default=None, use 'gzip', for gzipped files.")
args = parser.parse_args()
main(args)
| mit |
bhargav/scikit-learn | sklearn/covariance/tests/test_covariance.py | 79 | 12193 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
diegoaurino/numerical_python | sets_relationships_visualizer/sets_relationships_visualizer/sets_relationships_visualizer.py | 1 | 1163 | #!/usr/bin/env python
"""
Project: sets_realationships_visualizer
Description: A small program that creates a Venn diagram to show
the relationship between two sets of questions (yes[1] or no[0]).
Author: Diego Aurino da Silva
Author URI: http://diegoaurino.info/
Default repository: https://github.com/diegoaurino/numerical_python
Version: NA
License: MIT
License URI: https://github.com/diegoaurino/numerical_python/blob/master/LICENSE
Copyright © 2017 Diego Aurino
"""
import sys, csv, matplotlib, matplotlib_venn, sympy
def sets_relationships_visualizer(filename):
question_one, question_two = list(), list()
for answer in csv.reader(open(filename)):
if answer[1] == '1': question_one.append(answer[0])
if answer[2] == '1': question_two.append(answer[0])
matplotlib_venn.venn2(subsets=[sympy.FiniteSet(*question_one),
sympy.FiniteSet(*question_two)],
set_labels=('This', 'That'))
matplotlib.pyplot.show()
if __name__ == "__main__":
sys.exit(int(
sets_relationships_visualizer(input('Please, insert the csv file name: ')) or 0)) | mit |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e07_suicide_weather/test/test_suicide_weather.py | 1 | 2848 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch, MagicMock
import pandas as pd
from tmc import points
from tmc.utils import load, get_out, patch_helper, spy_decorator
module_name="src.suicide_weather"
suicide_weather = load(module_name, "suicide_weather")
suicide_fractions = load(module_name, "suicide_fractions")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p05-07.1')
class SuicideWeather(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.tup = suicide_weather()
def setUp(self):
self.tup = suicide_weather()
def test_return_value(self):
suicide_n, temperature_n, common_n, corr = self.tup
self.assertEqual(suicide_n, 141, msg="Incorrect size of suicide Series!")
self.assertEqual(temperature_n, 191, msg="Incorrect size of temperature Series!")
self.assertEqual(common_n, 108, msg="Incorrect size of common Series!")
self.assertAlmostEqual(corr, -0.5580402318136322, places=4,
msg="Incorrect Spearman correlation!")
def test_calls(self):
method = spy_decorator(pd.core.series.Series.corr, "corr")
f_method = spy_decorator(pd.core.frame.DataFrame.corr, "corr")
with patch(ph("suicide_fractions"), wraps=suicide_fractions) as psf,\
patch(ph("pd.read_html"), wraps=pd.read_html) as phtml,\
patch.object(pd.core.series.Series, "corr", new=method),\
patch.object(pd.core.frame.DataFrame, "corr", new=f_method),\
patch(ph("suicide_weather"), wraps=suicide_weather) as psw:
main()
psf.assert_called_once()
psw.assert_called_once()
phtml.assert_called_once()
if not f_method.mock.called:
method.mock.assert_called()
args, kwargs = method.mock.call_args
else:
args, kwargs = f_method.mock.call_args
correct = ((len(args) > 1 and args[1]== "spearman") or
("method" in kwargs and kwargs["method"] == "spearman"))
self.assertTrue(correct, msg="You did not compute Spearman correlation!")
out = get_out()
self.assertRegex(out, r"Suicide DataFrame has \d+ rows",
msg="Output line about Suicide was incorrect!")
self.assertRegex(out, r"Temperature DataFrame has \d+ rows",
msg="Output line about Temperature was incorrect!")
self.assertRegex(out, r"Common DataFrame has \d+ rows",
msg="Output line about Common was incorrect!")
self.assertRegex(out, r"Spearman correlation:\s+[+-]?\d+\.\d+",
msg="Output line about correlation was incorrect!")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
UNR-AERIAL/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
oemof/examples | oemof_examples/oemof.solph/v0.2.x/sdewes_paper_2017/economic_dispatch.py | 2 | 6046 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example from the SDEWES conference paper:
Simon Hilpert, Cord Kaldemeyer, Uwe Krien, Stephan Günther (2017).
'Solph - An Open Multi Purpose Optimisation Library for Flexible
Energy System Analysis'. Paper presented at SDEWES Conference,
Dubrovnik.
Data
----
timeseries.csv
Installation requirements
-------------------------
This example requires the latest version of oemof and others. Install by:
pip install oemof matplotlib networkx pygraphviz
"""
import os
import pandas as pd
import networkx as nx
from matplotlib import pyplot as plt
from oemof.network import Node
from oemof.outputlib import processing
from oemof.solph import (EnergySystem, Bus, Source, Sink, Flow, NonConvex,
Model, Transformer, components)
from oemof.graph import create_nx_graph as create_graph
def draw_graph(grph, edge_labels=True, node_color='#AFAFAF',
edge_color='#CFCFCF', plot=True, node_size=2000,
with_labels=True, arrows=True, layout='neato'):
"""
Draw a graph. This function will be removed in future versions.
Parameters
----------
grph : networkxGraph
A graph to draw.
edge_labels : boolean
Use nominal values of flow as edge label
node_color : dict or string
Hex color code oder matplotlib color for each node. If string, all
colors are the same.
edge_color : string
Hex color code oder matplotlib color for edge color.
plot : boolean
Show matplotlib plot.
node_size : integer
Size of nodes.
with_labels : boolean
Draw node labels.
arrows : boolean
Draw arrows on directed edges. Works only if an optimization_model has
been passed.
layout : string
networkx graph layout, one of: neato, dot, twopi, circo, fdp, sfdp.
"""
if type(node_color) is dict:
node_color = [node_color.get(g, '#AFAFAF') for g in grph.nodes()]
# set drawing options
options = {
'prog': 'dot',
'with_labels': with_labels,
'node_color': node_color,
'edge_color': edge_color,
'node_size': node_size,
'arrows': arrows
}
# draw graph
pos = nx.drawing.nx_agraph.graphviz_layout(grph, prog=layout)
nx.draw(grph, pos=pos, **options)
# add edge labels for all edges
if edge_labels is True and plt:
labels = nx.get_edge_attributes(grph, 'weight')
nx.draw_networkx_edge_labels(grph, pos=pos, edge_labels=labels)
# show output
if plot is True:
plt.show()
timeindex = pd.date_range('1/1/2017', periods=168, freq='H')
energysystem = EnergySystem(timeindex=timeindex)
Node.registry = energysystem
##########################################################################
# data
##########################################################################
# Read data file
full_filename = os.path.join(os.path.dirname(__file__),
'timeseries.csv')
timeseries = pd.read_csv(full_filename, sep=',')
##########################################################################
# Create oemof object
##########################################################################
bel = Bus(label='bel')
Sink(label='demand_el',
inputs={
bel: Flow(actual_value=timeseries['demand_el'],
fixed=True, nominal_value=100)})
Source(label='pp_wind',
outputs={
bel: Flow(nominal_value=40, fixed=True,
actual_value=timeseries['wind'])})
Source(label='pp_pv',
outputs={
bel: Flow(nominal_value=20, fixed=True,
actual_value=timeseries['pv'])})
Source(label='pp_gas',
outputs={
bel: Flow(nominal_value=50, nonconvex=NonConvex(),
variable_costs=60,
negative_gradient={'ub': 0.05, 'costs': 0},
positive_gradient={'ub': 0.05, 'costs': 0})})
Source(label='pp_bio',
outputs={
bel: Flow(nominal_value=5,
variable_costs=100)})
components.GenericStorage(
label='storage_el',
inputs={
bel: Flow()},
outputs={
bel: Flow()},
nominal_capacity=40,
nominal_input_capacity_ratio=1/10,
nominal_output_capacity_ratio=1/10,
)
# heat componentes
bth = Bus(label='bth')
bgas = Bus(label='bgas')
Source(label='gas',
outputs={
bgas: Flow()})
Sink(label='demand_th',
inputs={
bth: Flow(actual_value=timeseries['demand_th'],
fixed=True, nominal_value=100)})
Transformer(label='pth',
inputs={
bel: Flow()},
outputs={
bth: Flow(nominal_value=30)},
conversion_factors={bth: 0.99})
Transformer(label='chp',
inputs={
bgas: Flow(variable_costs=80)},
outputs={
bel: Flow(nominal_value=40),
bth: Flow()},
conversion_factors={bel: 0.4,
bth: 0.4})
Source(label='boiler_bio',
outputs={
bth: Flow(nominal_value=100,
variable_costs=60)})
components.GenericStorage(
label='storage_th',
inputs={
bth: Flow()},
outputs={
bth: Flow()},
nominal_capacity=30,
nominal_input_capacity_ratio=1/8,
nominal_output_capacity_ratio=1/8,
)
##########################################################################
# Create model and solve
##########################################################################
m = Model(energysystem)
# emission_limit(m, flows=m.flows, limit=954341)
# m.write('test_nc.lp', io_options={'symbolic_solver_labels': True})
m.solve(solver='cbc', solve_kwargs={'tee': True})
results = processing.results(m)
graph = create_graph(energysystem, m)
draw_graph(graph, plot=True, layout='neato', node_size=3000,
node_color={'bel': '#7EC0EE', 'bgas': '#eeac7e', 'bth': '#cd3333'})
| gpl-3.0 |
maheshakya/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
mahak/spark | python/pyspark/pandas/tests/test_ops_on_diff_frames.py | 14 | 74918 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.frame import DataFrame
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
@property
def psdf3(self):
return ps.from_pandas(self.pdf3)
@property
def psdf4(self):
return ps.from_pandas(self.pdf4)
@property
def psdf5(self):
return ps.from_pandas(self.pdf5)
@property
def psdf6(self):
return ps.from_pandas(self.pdf6)
@property
def psser1(self):
return ps.from_pandas(self.pser1)
@property
def psser2(self):
return ps.from_pandas(self.pser2)
@property
def psser3(self):
return ps.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ps.range(10) + ps.range(10)).sort_index(),
(
ps.DataFrame({"id": list(range(10))}) + ps.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ps.DataFrame({"a": [1, 2, 3]}).set_index("a") + ps.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((psdf1.a - psdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((psdf1.a * psdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] - psdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(psdf1["x"]["a"] - psdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((psser1 + psser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((psser1 - psser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((psser1 * psser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = ps.from_pandas(pdf3)
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq(
(psdf1.a - psdf2.b - psdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index()
)
assert_eq(
(psdf1.a * (psdf2.a * psdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
psdf3.columns = columns
pdf3.columns = columns
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
# Series
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")] - psdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] * (psdf2[("x", "b")] * psdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((psser1 + psser2 - psser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((psser1 * psser2 * psser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((psser1 - psser2 / psser3).sort_index(), expected)
else:
assert_eq((psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((psser1 + psser2 * psser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), psdf1[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), psdf1.A[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (psdf1.A + 1)[psdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), psdf1.loc[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), psdf1.A.loc[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (psdf1.A + 1).loc[psdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
assert_eq((psser1 | psser2).sort_index(), pser1 | pser2)
assert_eq((psser1 & psser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
# a pandas bug?
# assert_eq((psser1 | psser2).sort_index(), pser1 | pser2)
# assert_eq((psser1 & psser2).sort_index(), pser1 & pser2)
assert_eq(
(psser1 | psser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(psser1 & psser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = psdf1.copy()
psdf4 = psdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
psdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
psdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
psdf5 = ps.from_pandas(pdf5)
psdf6 = ps.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([psdf1, psdf2.C], [pdf1, pdf2.C]),
([psdf1.A, psdf2], [pdf1.A, pdf2]),
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
([psdf3[("X", "A")], psdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([psdf3, psdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([psdf3[("X", "A")], psdf4], [pdf3[("X", "A")], pdf4]),
([psdf3, psdf4], [pdf3, pdf4]),
([psdf5, psdf6], [pdf5, pdf6]),
([psdf6, psdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (psdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ps.concat(psdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
TypeError, "`combine_first` only allows `Series` for parameter `other`"
):
psser1.combine_first(50)
psser1.name = ("X", "A")
psser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
psser1.name = ("X", "A")
psser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(0, "a", psser)
pdf.insert(0, "a", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf.insert(0, ("b", "c", ""), psser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
psser1 = ps.Series(["b", "c", np.nan, "g", np.nan])
psser2 = ps.Series(["a", "c", np.nan, np.nan, "h"])
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
psser1 = ps.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.Index([1, 2, 3, 4, 5]),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.Index([5, 4, 3, 2, 1]),
)
psser1.compare(psser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
psser1.compare(psser2)
def test_different_columns(self):
psdf1 = self.psdf1
psdf4 = self.psdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
psdf4.columns = columns
pdf4.columns = columns
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["c"] = self.psdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf.columns = columns
pdf.columns = columns
psdf[("y", "c")] = self.psdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
psdf = ps.from_pandas(pdf)
psdf.index.name = None
psdf["NEW"] = ps.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' does not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["b", "c"]] = self.psdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' and 'd' do not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["c", "d"]] = self.psdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf.columns = columns
pdf.columns = columns
psdf[[("y", "c"), ("z", "d")]] = self.psdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf1 = ps.from_pandas(self.pdf1)
pdf1 = self.pdf1
psdf1.columns = columns
pdf1.columns = columns
psdf[["c", "d"]] = psdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["a"] = self.psdf1.a
pdf["a"] = self.pdf1.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
psdf["d"] = self.psdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
psdf[["e", "f"]] = self.psdf3
pdf[["e", "f"]] = self.pdf3
psdf[["b", "c"]] = self.psdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
psdf5 = self.psdf5
psdf6 = self.psdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((psdf5.c - psdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((psdf5["c"] / psdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((psdf5 + psdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["x"] = self.psdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["e"] = self.psdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["c"] = self.psdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["c"]] = self.psdf5
pdf[["c"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["x"]] = self.psdf5
pdf[["x"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf6)
pdf = self.pdf6
psdf[["x", "y"]] = self.psdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf_orig = ps.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
another_psdf = ps.DataFrame(pdf_orig)
psdf.loc[["viper", "sidewinder"], ["shield"]] = -another_psdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
psdf.iloc[[0, 1, 2], 1] = -another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(
ValueError,
"shape mismatch",
):
psdf.iloc[[1, 2], [1]] = -another_psdf.max_speed
psdf.iloc[[0, 1, 2], 1] = 10 * another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(ValueError, "shape mismatch"):
psdf.iloc[[0], 1] = 10 * another_psdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.loc[psser % 2 == 1] = -psser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[["viper", "sidewinder"]] = -psser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser1 = pser + 1
psser1 = psser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.iloc[[0, 1, 2]] = -psser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[1, 2]] = -psser_another
psser.iloc[[0, 1, 2]] = 10 * psser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[0]] = 10 * psser_another
psser1.iloc[[0, 1, 2]] = -psser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser1, pser1)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser1.iloc[[1, 2]] = -psser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
piloc = pser.iloc
kiloc = psser.iloc
kiloc[[0, 1, 2]] = -psser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[1, 2]] = -psser_another
kiloc[[0, 1, 2]] = 10 * psser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[0]] = 10 * psser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
pser.update(pd.Series([4, 5, 6]))
psser.update(ps.Series([4, 5, 6]))
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), psdf1.where(psdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), psdf1.mask(psdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
psdf = ps.DataFrame(pdf)
psdf["c"] = ps.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
psdf[("d", "x")] = ps.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
psdf[("d", "y")] = ps.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
psdf["e"] = ps.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
psdf[[("f", "x"), ("f", "y")]] = ps.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(psdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
psdf[("1", "2", "3")] = ps.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
psser_other = ps.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
# length of index is different
psser_other = ps.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
psser.dot(psser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
psser = ps.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y")], names=["cols_name1", "cols_name2"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
psser = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).b
pser = psser.to_pandas()
psdf = ps.DataFrame({"c": [7, 8, 9]})
pdf = psdf.to_pandas()
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
def test_frame_dot(self):
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 1, 2, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# Index reorder
pser = pser.reindex([1, 0, 2, 3])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# ser with name
pser.name = "ser"
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex as column (ser with MultiIndex)
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index as column (ser with Index)
pidx = pd.Index([1, 2, 3, 4], name="number")
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index
pdf.index = pd.Index(["x", "y"], name="char")
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex
pdf.index = pd.MultiIndex.from_arrays([[1, 1], ["red", "blue"]], names=("number", "color"))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
pdf = pd.DataFrame([[1, 2], [3, 4]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psdf[0]), pdf.dot(pdf[0]))
self.assert_eq(psdf.dot(psdf[0] * 10), pdf.dot(pdf[0] * 10))
self.assert_eq((psdf + 1).dot(psdf[0] * 10), (pdf + 1).dot(pdf[0] * 10))
def test_to_series_comparison(self):
psidx1 = ps.Index([1, 2, 3, 4, 5])
psidx2 = ps.Index([1, 2, 3, 4, 5])
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
psidx1.name = "koalas"
psidx2.name = "koalas"
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
def test_series_repeat(self):
pser1 = pd.Series(["a", "b", "c"], name="a")
pser2 = pd.Series([10, 20, 30], name="rep")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(psser1.repeat(psser2).sort_index(), pser1.repeat(pser2).sort_index())
def test_series_ops(self):
pser1 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pser2 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pidx1 = pd.Index([10, 11, 12, 13, 14, 15, 16], name="x")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psidx1 = ps.from_pandas(pidx1)
self.assert_eq(
(psser1 + 1 + 10 * psser2).sort_index(), (pser1 + 1 + 10 * pser2).sort_index()
)
self.assert_eq(
(psser1 + 1 + 10 * psser2.rename()).sort_index(),
(pser1 + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2).sort_index(),
(pser1.rename() + 1 + 10 * pser2).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2.rename()).sort_index(),
(pser1.rename() + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(psser1 + 1 + 10 * psidx1, pser1 + 1 + 10 * pidx1)
self.assert_eq(psser1.rename() + 1 + 10 * psidx1, pser1.rename() + 1 + 10 * pidx1)
self.assert_eq(psser1 + 1 + 10 * psidx1.rename(None), pser1 + 1 + 10 * pidx1.rename(None))
self.assert_eq(
psser1.rename() + 1 + 10 * psidx1.rename(None),
pser1.rename() + 1 + 10 * pidx1.rename(None),
)
self.assert_eq(psidx1 + 1 + 10 * psser1, pidx1 + 1 + 10 * pser1)
self.assert_eq(psidx1 + 1 + 10 * psser1.rename(), pidx1 + 1 + 10 * pser1.rename())
self.assert_eq(psidx1.rename(None) + 1 + 10 * psser1, pidx1.rename(None) + 1 + 10 * pser1)
self.assert_eq(
psidx1.rename(None) + 1 + 10 * psser1.rename(),
pidx1.rename(None) + 1 + 10 * pser1.rename(),
)
pidx2 = pd.Index([11, 12, 13])
psidx2 = ps.from_pandas(pidx2)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psser1 + psidx2
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx2 + psser1
def test_index_ops(self):
pidx1 = pd.Index([1, 2, 3, 4, 5], name="x")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
self.assert_eq(psidx1.rename(None) * 10 + psidx2, pidx1.rename(None) * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx2.rename(None), pidx1 * 10 + pidx2.rename(None))
else:
self.assert_eq(
psidx1 * 10 + psidx2.rename(None), (pidx1 * 10 + pidx2.rename(None)).rename(None)
)
pidx3 = pd.Index([11, 12, 13])
psidx3 = ps.from_pandas(pidx3)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx1 + psidx3
pidx1 = pd.Index([1, 2, 3, 4, 5], name="a")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="a")
pidx3 = pd.Index([11, 12, 13, 14, 15], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx3, pidx1 * 10 + pidx3)
else:
self.assert_eq(psidx1 * 10 + psidx3, (pidx1 * 10 + pidx3).rename(None))
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=axis)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
pser1 = pd.Series([7, 8, 9], index=[10, 11, 12])
pser2 = pd.Series(["g", "h", "i"], index=[10, 20, 30])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
for join in ["outer", "inner", "left", "right"]:
psser_l, psser_r = psser1.align(psser2, join=join)
pser_l, pser_r = pser1.align(pser2, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser1, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser1, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser1.align(psdf1, join=join)
pser_l, pdf_r = pser1.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
# multi-index columns
pdf3 = pd.DataFrame(
{("x", "a"): [4, 5, 6], ("y", "c"): ["d", "e", "f"]}, index=[10, 11, 12]
)
psdf3 = ps.from_pandas(pdf3)
pser3 = pdf3[("y", "c")]
psser3 = psdf3[("y", "c")]
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf3, join=join, axis=0)
pdf_l, pdf_r = pdf1.align(pdf3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
psser_l, psser_r = psser1.align(psser3, join=join)
pser_l, pser_r = pser1.align(pser3, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser3, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser3.align(psdf1, join=join)
pser_l, pdf_r = pser3.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=None))
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(pser.pow(pser_other), psser.pow(psser_other).sort_index())
self.assert_eq(pser ** pser_other, (psser ** psser_other).sort_index())
self.assert_eq(pser.rpow(pser_other), psser.rpow(psser_other).sort_index())
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.shift().loc[pdf["Col1"] == 20].astype(int), psdf.shift().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].shift().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].shift().loc[psdf["Col1"] == 20],
)
def test_diff(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.diff().loc[pdf["Col1"] == 20].astype(int), psdf.diff().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].diff().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].diff().loc[psdf["Col1"] == 20],
)
def test_rank(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().loc[pdf["Col1"] == 20], psdf.rank().loc[psdf["Col1"] == 20])
self.assert_eq(
pdf["Col2"].rank().loc[pdf["Col1"] == 20], psdf["Col2"].rank().loc[psdf["Col1"] == 20]
)
class OpsOnDiffFramesDisabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", False)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
def test_arithmetic(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.b
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.a
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1["a"] - self.psdf2["a"]
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1 - self.psdf2
def test_assignment(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf = ps.from_pandas(self.pdf1)
psdf["c"] = self.psdf1.a
def test_frame_loc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[["viper", "sidewinder"], ["shield"]] = another_psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.iloc[[1, 2], [1]] = another_psdf.max_speed.iloc[[1, 2]]
def test_series_loc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser % 2 == 1] = -psser_another
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser_another
def test_series_iloc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.iloc[[1]] = -psser_another.iloc[[1]]
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 > 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 < -250)
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 < 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 > -250)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2, axis=0)
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.pow(psser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser ** psser_other
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.rpow(psser_other)
if __name__ == "__main__":
from pyspark.pandas.tests.test_ops_on_diff_frames import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
0x0all/scikit-learn | sklearn/datasets/svmlight_format.py | 6 | 14944 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f: {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features: int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
sdiazb/airflow | airflow/hooks/hive_hooks.py | 7 | 28649 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
dsullivan7/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
pwittrock/test-infra | hack/analyze-memory-profiles.py | 9 | 6137 | #!/usr/bin/env python3
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is meant to be used to analyze memory profiles created by the Prow binaries when
# the --profile-memory-usage flag is passed. The interval of profiling can be set with the
# --memory-profile-interval flag. This tool can also be used on the output of the sidecar utility
# when the sidecar.Options.WriteMemoryProfile option has been set. The tools will write sequential
# profiles into a directory, from which this script can load the data, create time series and
# visualize them.
import os
import pathlib
import subprocess
import sys
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.font_manager import FontProperties
if len(sys.argv) != 2:
print("[ERROR] Expected the directory containing profiles as the only argument.")
print("Usage: {} ./path/to/profiles/".format(sys.argv[0]))
sys.exit(1)
profile_dir = sys.argv[1]
def parse_bytes(value):
# we will either see a raw number or one with a suffix
value = value.decode("utf-8")
if not value.endswith("B"):
return float(value)
suffix = value[-2:]
multiple = 1
if suffix == "KB":
multiple = 1024
elif suffix == "MB":
multiple = 1024 * 1024
elif suffix == "GB":
multiple = 1024 * 1024 * 1024
return float(value[:-2]) * multiple
overall_name = "overall".encode("utf-8")
dates_by_name = {overall_name: []}
flat_usage_over_time = {overall_name: []}
cumulative_usage_over_time = {overall_name: []}
max_usage = 0
for subdir, dirs, files in os.walk(profile_dir):
for file in files:
full_path = os.path.join(subdir, file)
date = datetime.fromtimestamp(pathlib.Path(full_path).stat().st_mtime)
output = subprocess.run(
["go", "tool", "pprof", "-top", "-inuse_space", full_path],
check=True, stdout=subprocess.PIPE
)
# The output of go tool pprof will look like:
#
# File: sidecar
# Type: inuse_space
# Time: Mar 19, 2021 at 10:30am (PDT)
# Showing nodes accounting for 66.05MB, 100% of 66.05MB total
# flat flat% sum% cum cum%
# 64MB 96.90% 96.90% 64MB 96.90% google.golang.org/api/internal/gensupport...
#
# We want to parse all of the lines after the header and metadata.
lines = output.stdout.splitlines()
usage = parse_bytes(lines[3].split()[-2])
if usage > max_usage:
max_usage = usage
data_index = 0
for i in range(len(lines)):
if lines[i].split()[0].decode("utf-8") == "flat":
data_index = i + 1
break
flat_overall = 0
cumulative_overall = 0
for line in lines[data_index:]:
parts = line.split()
name = parts[5]
if name not in dates_by_name:
dates_by_name[name] = []
dates_by_name[name].append(date)
if name not in flat_usage_over_time:
flat_usage_over_time[name] = []
flat_usage = parse_bytes(parts[0])
flat_usage_over_time[name].append(flat_usage)
flat_overall += flat_usage
if name not in cumulative_usage_over_time:
cumulative_usage_over_time[name] = []
cumulative_usage = parse_bytes(parts[3])
cumulative_usage_over_time[name].append(cumulative_usage)
cumulative_overall += cumulative_usage
dates_by_name[overall_name].append(date)
flat_usage_over_time[overall_name].append(flat_overall)
cumulative_usage_over_time[overall_name].append(cumulative_overall)
plt.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(30, 18))
plt.subplots_adjust(right=0.7)
ax = plt.subplot(211)
for name in dates_by_name:
dates = mdates.date2num(dates_by_name[name])
values = flat_usage_over_time[name]
# we only want to show the top couple callsites, or our legend gets noisy
if max(values) > 0.01 * max_usage:
ax.plot_date(dates, values,
label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
linestyle='solid')
else:
ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
formatter = ticker.FuncFormatter(lambda y, pos: '{:,.0f}'.format(y / (1024 * 1024)) + 'MB')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Flat Space In Use (bytes)")
plt.title("Space In Use By Callsite")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
ax = plt.subplot(212)
for name in dates_by_name:
dates = mdates.date2num(dates_by_name[name])
values = cumulative_usage_over_time[name]
# we only want to show the top couple callsites, or our legend gets noisy
if max(values) > 0.01 * max_usage:
ax.plot_date(dates, values,
label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
linestyle='solid')
else:
ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Cumulative Space In Use (bytes)")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
plt.show()
| apache-2.0 |
cauchycui/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/datasets/species_distributions.py | 64 | 7917 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QAQAWEB.py | 2 | 1943 | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""QUANTAXIS访问获取QAWEB的行情
233333 结果变成自己访问自己了
"""
import pandas as pd
from QUANTAXIS.QAUtil.QACode import QA_util_code_tostr
def QA_fetch_get_stock_day(code, start, end, ip='192.168.0.1', port='8010'):
pass
# requests.get(
def QA_fetch_get_stock_block():
"""ths的版块数据
Returns:
[type] -- [description]
"""
url = 'http://data.yutiansut.com/self_block.csv'
try:
bl = pd.read_csv(url)
return bl.assign(code=bl['证券代码'].apply(QA_util_code_tostr), blockname=bl['行业'], name=bl['证券名称'], source='outside', type='outside').set_index('code', drop=False)
except Exception as e:
print(e)
return None
if __name__ == "__main__":
print(QA_fetch_get_stock_block()) | mit |
paulbnjl/PySerSpec | PySerSpec/data_proc.py | 1 | 13807 |
#-*- coding: UTF-8 -*
############################################################################
############################################################################
#### Person responsible for this pure evil spaghetti code : ####
#### Paul Bonijol -> paul [.] bnjl ['AT'] gmail [.] com ####
#### Works with python3.4 ####
#### ####
#### PySerSpec.py is free software: you can redistribute it and/or ####
#### modify it under the terms of the GNU General Public License as ####
#### published by the Free Software Foundation, either version 3 of the ####
#### License, or (at your option) any later version. ####
#### See http://www.gnu.org/licenses for more information. ####
#### ####
#### I hope this program will be useful to somebody else ! ####
#### But please keep in mind that it comes WITHOUT ANY WARRANTY ! ####
#### If something bad happens, well, sorry ! :( ####
########################### DATA PROCESSING CLASS ##########################
############################################################################
import matplotlib.pyplot as plot
import csv
import os
import datetime
class DataProcessing:
def __init__(self):
self.PROCESS_DATA = []
self.ABS_raw = []
self.ABS_corr = []
self.TIME = []
self.WV = []
def data_spectrum(self, DATA_output):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
WV_nested = [s.split(' ', 1)[:1] for s in self.PROCESS_DATA]
self.WV = [val for sublist in WV_nested for val in sublist if val != ""]
ABS_nested = [s.split(' ', 1)[1:] for s in self.PROCESS_DATA]
self.ABS_raw = [val for sublist in ABS_nested for val in sublist if val != ""]
self.ABS_corr = [val for val in self.ABS_raw]
for i in range(len(self.ABS_corr)):
if '-' in self.ABS_corr[i]:
self.ABS_corr[i] = '0'
else:
pass
return self.ABS_corr, self.ABS_raw, self.WV
def data_time_range1(self, DATA_output):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
TIME_nested = [s.split(' ', 2)[:2] for s in self.PROCESS_DATA]
self.TIME = [val for sublist in TIME_nested for val in sublist if val != ""]
self.TIME = [int(float(x))*100 for x in self.TIME]
timeval = 0
for val in self.TIME:
self.TIME[timeval] = timeval
timeval +=1
ABS_nested = [s.split(' ', 2)[2:] for s in self.PROCESS_DATA]
self.ABS_raw = [val for sublist in ABS_nested for val in sublist if val != ""]
self.ABS_corr = [val for val in self.ABS_raw]
for i in range(len(self.ABS_corr)):
if '-' in self.ABS_corr[i]:
self.ABS_corr[i] = '0'
else:
pass
return self.ABS_raw, self.ABS_corr, self.TIME
def data_time_range2(self, DATA_output):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
TIME_nested = [s.split(' ', 2)[:2] for s in self.PROCESS_DATA]
self.TIME = [val for sublist in TIME_nested for val in sublist if val != ""]
self.TIME = [int(float(x))*10 for x in self.TIME]
print(self.TIME) #
timeval = 0
for val in self.TIME:
self.TIME[timeval] = timeval
timeval +=1
ABS_nested = [s.split(' ', 2)[2:] for s in self.PROCESS_DATA]
self.ABS_raw = [val for sublist in ABS_nested for val in sublist if val != ""]
self.ABS_corr = [val for val in self.ABS_raw]
for i in range(len(self.ABS_corr)):
if '-' in self.ABS_corr[i]:
self.ABS_corr[i] = '0'
else:
pass
return self.ABS_raw, self.ABS_corr, self.TIME
def data_time_range3(self, DATA_output, TIME_VAL):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
#TIME_nested = [s.split(' ', 2)[:2] for s in self.PROCESS_DATA]
self.TIME.append(int(TIME_VAL)/50)
x = (int(TIME_VAL)/50)
while len(self.TIME) <= 49:
self.TIME.append(x + (int(TIME_VAL)/50))
x += (int(TIME_VAL)/50)
ABS_nested = [s.split(' ', 2)[2:] for s in self.PROCESS_DATA]
self.ABS_raw = [val for sublist in ABS_nested for val in sublist if val != ""]
self.ABS_corr = [val for val in self.ABS_raw]
for i in range(len(self.ABS_corr)):
if '-' in self.ABS_corr[i]:
self.ABS_corr[i] = '0'
else:
pass
return self.ABS_raw, self.ABS_corr, self.TIME
def data_time_range4(self, DATA_output, TIME_VAL):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
#TIME_nested = [s.split(' ', 2)[:2] for s in self.PROCESS_DATA]
self.TIME.append((int(TIME_VAL)/10))
x = (int(TIME_VAL)/10)
while len(self.TIME) <= 9:
self.TIME.append(x + (int(TIME_VAL)/10))
x += (int(TIME_VAL)/10)
ABS_nested = [s.split(' ', 2)[2:] for s in self.PROCESS_DATA]
self.ABS_raw = [val for sublist in ABS_nested for val in sublist if val != ""]
self.ABS_corr = [val for val in self.ABS_raw]
for i in range(len(self.ABS_corr)):
if '-' in self.ABS_corr[i]:
self.ABS_corr[i] = '0'
else:
pass
return self.ABS_raw, self.ABS_corr, self.TIME
def data_value(self, DATA_output):
for val in DATA_output:
self.PROCESS_DATA.append(val.decode('utf-8'))
count = 0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x00",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("b'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x05",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x04",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x1b",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("'",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x2c",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("\x06",'')
count += 1
count=0
for val in self.PROCESS_DATA:
self.PROCESS_DATA[count] = val.replace("d",'')
count += 1
self.ABS_raw = self.PROCESS_DATA[0]
self.ABS_corr = self.ABS_raw
for val in range(len(self.ABS_corr)):
if '-' in self.ABS_corr:
self.ABS_corr = '0'
else:
pass
return self.ABS_corr, self.ABS_raw
def data_plot(self, title_x_axis, title_y_axis,val_ax, val_or):
for i in range(len(val_ax)):
if val_ax[i] == '':
val_ax[i] = val_ax[i-1]
else:
pass
plot.plot(val_ax,val_or)
plot.title(title_y_axis + '=f(' + title_x_axis + ')')
plot.xlabel(title_x_axis)
plot.ylabel(title_y_axis)
plot.show()
def data_save_csv(self, name, title_column1, title_column2, wv_or_time_val, abs_or_tr_or_en_val, gain_set, light_set, mode_set):
if os.path.exists('./data') == False:
os.makedirs('data/')
else:
pass
os.chdir('data/')
if mode_set == '1':
data_mode = 'Absorbance'
elif mode_set == '2':
data_mode = 'Transmittance'
elif mode_set == '3':
data_mode = 'Energy'
else:
pass
if light_set == '1':
light = 'WI lamp'
elif light_set == '2':
light = 'D2 lamp'
elif light_set == '3':
light = 'Custom lamp'
else:
pass
print("Please type an identifier (first for raw data csv, then corrected data csv) : ")
sample_id = input()
filename = datetime.datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
data_file = open(sample_id + '_' +name + '_' + filename + '.csv', 'w', newline='')
writer = csv.writer(data_file)
writer.writerow((title_column1, title_column2))
for val1, val2 in zip(wv_or_time_val, abs_or_tr_or_en_val) :
writer.writerow((val1, val2))
writer.writerow(('####', '####'))
writer.writerow((' Gain : ', gain_set))
writer.writerow((' Light Source : ', light))
writer.writerow((' Mode : ', data_mode))
writer.writerow((' Time : ', datetime.datetime.now().strftime("%d/%m/%Y-%H:%M:%S")))
writer.writerow(('####', '####'))
data_file.close()
os.chdir('..')
def data_save_csv_mono(self, name, title_column1, title_column2, wv_or_time_val, abs_or_tr_or_en_val, gain_set, light_set, mode_set):
if os.path.exists('./data') == False:
os.makedirs('data/')
else:
pass
os.chdir('data/')
if mode_set == '1':
data_mode = 'Absorbance'
elif mode_set == '2':
data_mode = 'Transmittance'
elif mode_set == '3':
data_mode = 'Energy'
else:
pass
if light_set == '1':
light_source = 'WI lamp'
elif light_set == '2':
light_source = 'D2 lamp'
elif light_set == '3':
light_source = 'Custom lamp'
else:
pass
print("Please type an identifier (first for raw data csv, then corrected data csv) : ")
sample_id_mono = input()
filename = datetime.datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
data_file = open(sample_id_mono + '_' + name + '_' + filename + '.csv', 'w', newline='')
writer = csv.writer(data_file)
writer.writerow((title_column1, title_column2))
writer.writerow((int(wv_or_time_val)/10, abs_or_tr_or_en_val))
writer.writerow(('####', '####'))
writer.writerow((' Gain : ', gain_set))
writer.writerow((' Light Source : ', light_source))
writer.writerow((' Mode : ', data_mode))
writer.writerow((' Time : ', datetime.datetime.now().strftime("%d/%m/%Y-%H:%M:%S")))
writer.writerow(('####', '####'))
data_file.close()
os.chdir('..')
| gpl-3.0 |
wlamond/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
bzero/statsmodels | statsmodels/datasets/fair/data.py | 25 | 3074 | #! /usr/bin/env python
"""Fair's Extramarital Affairs Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Included with permission of the author."""
TITLE = """Affairs dataset"""
SOURCE = """
Fair, Ray. 1978. "A Theory of Extramarital Affairs," `Journal of Political
Economy`, February, 45-61.
The data is available at http://fairmodel.econ.yale.edu/rayfair/pdf/2011b.htm
"""
DESCRSHORT = """Extramarital affair data."""
DESCRLONG = """Extramarital affair data used to explain the allocation
of an individual's time among work, time spent with a spouse, and time
spent with a paramour. The data is used as an example of regression
with censored data."""
#suggested notes
NOTE = """::
Number of observations: 6366
Number of variables: 9
Variable name definitions:
rate_marriage : How rate marriage, 1 = very poor, 2 = poor, 3 = fair,
4 = good, 5 = very good
age : Age
yrs_married : No. years married. Interval approximations. See
original paper for detailed explanation.
children : No. children
religious : How relgious, 1 = not, 2 = mildly, 3 = fairly,
4 = strongly
educ : Level of education, 9 = grade school, 12 = high
school, 14 = some college, 16 = college graduate,
17 = some graduate school, 20 = advanced degree
occupation : 1 = student, 2 = farming, agriculture; semi-skilled,
or unskilled worker; 3 = white-colloar; 4 = teacher
counselor social worker, nurse; artist, writers;
technician, skilled worker, 5 = managerial,
administrative, business, 6 = professional with
advanced degree
occupation_husb : Husband's occupation. Same as occupation.
affairs : measure of time spent in extramarital affairs
See the original paper for more details.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=8, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=8, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/fair.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
| bsd-3-clause |
all-umass/manifold_spanning_graphs | neighborhood.py | 1 | 1945 | import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
try:
from bottleneck import argpartsort
except ImportError:
try:
# Added in version 1.8, which is pretty new.
# Sadly, it's still slower than bottleneck's version.
argpartsort = np.argpartition
except AttributeError:
argpartsort = lambda arr,k: np.argsort(arr)
def min_k_indices(arr, k, inv_ind=False):
'''Returns indices of the k-smallest values in each row, unsorted.
The `inv_ind` flag returns the tuple (k-smallest,(n-k)-largest). '''
psorted = argpartsort(arr, k)
if inv_ind:
return psorted[...,:k], psorted[...,k:]
return psorted[...,:k]
def neighbor_graph(X, precomputed=False, k=None, epsilon=None, symmetrize=True, weighting='binary'):
'''Construct an adj matrix from a matrix of points (one per row).
When `precomputed` is True, X is a distance matrix.
`weighting` param can be one of {binary, none}.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert weighting in ('binary','none'), "Invalid weighting param: "+weighting
num_pts = X.shape[0]
if precomputed:
dist = X.copy()
else:
dist = pairwise_distances(X, metric='sqeuclidean')
if k is not None:
k = min(k+1, num_pts)
nn,not_nn = min_k_indices(dist, k, inv_ind=True)
if epsilon is not None:
if k is not None:
dist[np.arange(dist.shape[0]), not_nn.T] = np.inf
in_ball = dist <= epsilon
dist[~in_ball] = 0 # zero out neighbors too far away
if symmetrize and k is not None:
# filtering may have caused asymmetry
dist = (dist + dist.T) / 2
else:
for i in xrange(num_pts):
dist[i,not_nn[i]] = 0 # zero out neighbors too far away
if symmetrize:
dist = (dist + dist.T) / 2
if weighting is 'binary':
# cycle through boolean and back to get 1/0 in floating points
return dist.astype(bool).astype(float)
return dist
| mit |
rhenley/pylibnidaqmx | nidaqmx/wxagg_plot.py | 16 | 4515 |
import os
import sys
import time
import traceback
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
import wx
from matplotlib.figure import Figure
class PlotFigure(wx.Frame):
def OnKeyPressed (self, event):
key = event.key
if key=='q':
self.OnClose(event)
def __init__(self, func, timer_period):
wx.Frame.__init__(self, None, -1, "Plot Figure")
self.fig = Figure((12,9), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.canvas.mpl_connect('key_press_event', self.OnKeyPressed)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
self.func = func
self.plot = None
self.timer_period = timer_period
self.timer = wx.Timer(self)
self.is_stopped = False
if os.name=='nt':
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
self.Bind(wx.EVT_TIMER, self.OnTimerWrap, self.timer)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.timer.Start(timer_period)
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnClose(self, event):
self.is_stopped = True
print 'Closing PlotFigure, please wait.'
self.timer.Stop()
self.Destroy()
def OnTimerWrap (self, evt):
if self.is_stopped:
print 'Ignoring timer callback'
return
t = time.time()
try:
self.OnTimer (evt)
except KeyboardInterrupt:
self.OnClose(evt)
duration = 1000*(time.time () - t)
if duration > self.timer_period:
print 'Changing timer_period from %s to %s msec' % (self.timer_period, 1.2*duration)
self.timer_period = 1.2*duration
self.timer.Stop()
self.timer.Start (self.timer_period)
def OnTimer(self, evt):
try:
xdata, ydata_list, legend = self.func()
except RuntimeError:
traceback.print_exc(file=sys.stderr)
self.OnClose(evt)
return
if len (ydata_list.shape)==1:
ydata_list = ydata_list.reshape((1, ydata_list.size))
if self.plot is None:
self.axes = self.fig.add_axes([0.1,0.1,0.8,0.8])
l = []
for ydata in ydata_list:
l.extend ([xdata, ydata])
self.plot = self.axes.plot(*l)
self.axes.set_xlabel('Seconds')
self.axes.set_ylabel('Volts')
self.axes.set_title('nof samples=%s' % (len(xdata)))
self.axes.legend (legend)
else:
self.axes.set_xlim(xmin = xdata[0], xmax=xdata[-1])
ymin, ymax = 1e9,-1e9
for line, data in zip (self.plot, ydata_list):
line.set_xdata(xdata)
line.set_ydata(data)
ymin, ymax = min (data.min (), ymin), max (data.max (), ymax)
dy = (ymax-ymin)/20
self.axes.set_ylim(ymin=ymin-dy, ymax=ymax+dy)
self.canvas.draw()
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
def animated_plot(func, timer_period):
app = wx.PySimpleApp(clearSigInt=False)
frame = PlotFigure(func, timer_period)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
from numpy import *
import time
start_time = time.time ()
def func():
x = arange (100, dtype=float)/100*pi
d = sin (x+(time.time ()-start_time))
return x, d, ['sin (x+time)']
try:
animated_plot (func, 1)
except Exception, msg:
print 'Got exception: %s' % ( msg)
else:
print 'Exited normally'
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 5 | 13165 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
robbymeals/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
FCP-INDI/C-PAC | CPAC/func_preproc/utils.py | 1 | 6803 |
import numpy as np
from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz
from matplotlib import pyplot as plt
import nibabel as nb
import subprocess
import math
def add_afni_prefix(tpattern):
if tpattern:
if ".txt" in tpattern:
tpattern = "@{0}".format(tpattern)
return tpattern
def nullify(value, function=None):
from traits.trait_base import Undefined
if value is None:
return Undefined
if function:
return function(value)
return value
def chunk_ts(func_file, n_cpus):
func_img = nb.load(func_file)
trs = func_img.shape[3]
chunk = trs/n_cpus
TR_ranges = []
for chunk_idx in range(0, n_cpus):
if chunk_idx == n_cpus - 1:
TR_ranges.append((int(chunk_idx*chunk), int(trs - 1)))
else:
TR_ranges.append((int(chunk_idx*chunk), int((chunk_idx+1)*chunk -1)))
return TR_ranges
def split_ts_chunks(func_file, tr_ranges):
if '.nii' in func_file:
ext = '.nii'
if '.nii.gz' in func_file:
ext = '.nii.gz'
split_funcs = []
for chunk_idx, tr_range in enumerate(tr_ranges):
out_file = os.path.join(os.getcwd(), os.path.basename(func_file).replace(ext, "_{0}{1}".format(chunk_idx, ext)))
in_file = "{0}[{1}..{2}]".format(func_file, tr_range[0], tr_range[1])
cmd = ["3dcalc", "-a", in_file, "-expr", "a", "-prefix", out_file]
retcode = subprocess.check_output(cmd)
split_funcs.append(out_file)
return split_funcs
def oned_text_concat(in_files):
out_file = os.path.join(os.getcwd(), os.path.basename(in_files[0].replace("_0", "")))
out_txt = []
for txt in in_files:
with open(txt, 'r') as f:
txt_lines = f.readlines()
if not out_txt:
out_txt = [x for x in txt_lines]
else:
for line in txt_lines:
if "#" in line:
continue
out_txt.append(line)
with open(out_file, 'wt') as f:
for line in out_txt:
f.write(line)
return out_file
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def notch_filter_motion(motion_params, filter_type, TR, fc_RR_min=None,
fc_RR_max=None, center_freq=None, freq_bw=None,
lowpass_cutoff=None, filter_order=4):
# Adapted from DCAN Labs:
# https://github.com/DCAN-Labs/dcan_bold_processing/blob/master/
# ...matlab_code/filtered_movement_regressors.m
if "ms" in TR:
TR = float(TR.replace("ms", ""))/1000
elif "ms" not in TR and "s" in TR:
TR = float(TR.replace("s", ""))
params_data = np.loadtxt(motion_params)
# Sampling frequency
fs = 1 / TR
# Nyquist frequency
fNy = fs / 2
if filter_type == "notch":
# Respiratory Rate
if fc_RR_min and fc_RR_max:
rr = [float(fc_RR_min) / float(60),
float(fc_RR_max) / float(60)]
rr_fNy = [rr[0] + fNy, rr[1] + fNy]
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif center_freq and freq_bw:
tail = float(freq_bw)/float(2)
fa = [center_freq-tail, center_freq+tail]
W_notch = np.divide(fa, fNy)
Wn = np.mean(W_notch)
bw = np.diff(W_notch)
# for filter info
center_freq = Wn * fNy
bandwidth = fa[1] - fa[0]
Q = Wn/bw
[b_filt, a_filt] = iirnotch(Wn, Q)
num_f_apply = np.floor(filter_order / 2)
filter_info = f"Motion estimate filter information\n\nType: Notch\n" \
f"\nCenter freq: {center_freq}\nBandwidth: {bandwidth}\n\n" \
f"Wn: {Wn}\nQ: {Q}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
elif filter_type == "lowpass":
if fc_RR_min:
rr = float(fc_RR_min) / float(60)
rr_fNy = rr + fNy
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif lowpass_cutoff:
fa = lowpass_cutoff
Wn = fa/fNy
if filter_order:
b_filt = firwin(filter_order+1, Wn)
a_filt = 1
num_f_apply = 0
filter_info = f"Motion estimate filter information\n\nType: Lowpass" \
f"\n\nCutoff freq: {fa}\nWn: {Wn}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
filter_design = os.path.join(os.getcwd(),
"motion_estimate_filter_design.txt")
filter_plot = os.path.join(os.getcwd(),
"motion_estimate_filter_freq-response.png")
# plot frequency response for user info
w, h = freqz(b_filt, a_filt, fs=fs)
fig, ax1 = plt.subplots()
ax1.set_title('Motion estimate filter frequency response')
ax1.plot(w, 20 * np.log10(abs(h)), 'b')
ax1.set_ylabel('Amplitude [dB]', color='b')
ax1.set_xlabel('Frequency [Hz]')
plt.savefig(filter_plot)
with open(filter_design, 'wt') as f:
f.write(filter_info)
# convert rotation params from degrees to mm
params_data[:, 0:3] = degrees_to_mm(params_data[:, 0:3], head_radius=50)
filtered_params = lfilter(b_filt, a_filt, params_data.T, zi=None)
for i in range(0, int(num_f_apply) - 1):
filtered_params = lfilter(b_filt, a_filt, filtered_params, zi=None)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
filtered_motion_params = os.path.join(os.getcwd(),
"{0}_filtered.1D".format(os.path.basename(motion_params)))
np.savetxt(filtered_motion_params, filtered_params.T, fmt='%f')
return (filtered_motion_params, filter_design, filter_plot)
| bsd-3-clause |
aiguofer/bokeh | tests/compat/lc_offsets.py | 13 | 1127 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html", title="lc_offsets.py example")
show(mpl.to_bokeh())
| bsd-3-clause |
antgonza/qiita | qiita_db/support_files/patches/python_patches/74.py | 3 | 1281 | import pandas as pd
from os.path import getsize, join, dirname, abspath, exists
from qiita_db.util import get_filepath_information, compute_checksum
from qiita_db.sql_connection import TRN
with TRN:
sql = """SELECT filepath_id
FROM qiita.filepath"""
TRN.add(sql)
fids = TRN.execute_fetchflatten()
fpath = join(dirname(abspath(__file__)), 'support_files', 'patches',
'python_patches', '74.py.cache.tsv')
cache = dict()
if exists(fpath):
df = pd.read_csv(fpath, sep='\t', index_col=0, dtype=str,
names=['filepath_id', 'checksum', 'fp_size'])
cache = df.to_dict('index')
for fid in fids:
if fid not in cache:
finfo = get_filepath_information(fid)
try:
size = getsize(finfo['fullpath'])
except FileNotFoundError:
size = 0
try:
checksum = compute_checksum(finfo['fullpath'])
except FileNotFoundError:
checksum = ''
else:
checksum = cache[fid]['checksum']
size = cache[fid]['fp_size']
with TRN:
sql = """UPDATE qiita.filepath
SET fp_size = %s, checksum = %s
WHERE filepath_id = %s"""
TRN.add(sql, tuple([size, checksum, fid]))
TRN.execute()
| bsd-3-clause |
zahasoft/scraps | source/python/datascience/pandas-hypothesis.py | 1 | 3825 | # Copyright (c) 2018 Nikolay Zahariev <zahasoft.com>. Licensed under the MIT License
import pandas
import numpy
from scipy.stats import ttest_ind
pandas.options.display.max_rows = 100
def read_university_towns(filename):
towns = []
with open(filename) as fhandle:
current_state = ''
for line in fhandle:
if '[edit]' in line:
current_state = line.split('[')[0].strip()
else:
towns.append([current_state, line.split(' (')[0].strip()])
return towns
def get_list_of_university_towns():
return pandas.DataFrame(read_university_towns('university_towns.txt'), columns=['State', 'RegionName'])
def get_gdp():
return pandas.read_excel('gdplev.xls', usecols=[4, 6], names=['Quarter', 'GDP'], skiprows=219)
def get_gdp_diffs(gdp):
return gdp['GDP'].diff().values.tolist()
def get_recession_start_and_bottom():
gdp = get_gdp()
diffs = get_gdp_diffs(gdp)
current_index = 0
value = 0
start_index = None
is_in_recession = False
for i in range(1, len(diffs)):
if is_in_recession:
if diffs[i] > 0 and diffs[i + 1] > 0:
return (gdp.iloc[start_index]['Quarter'], gdp.iloc[current_index]['Quarter'])
else:
if value < diffs[i]:
current_index = i
value = diffs[i]
elif diffs[i] < 0 and diffs[i + 1] < 0:
is_in_recession = True
current_index = i
start_index = current_index
value = diffs[i]
def get_recession_start():
return get_recession_start_and_bottom()[0]
def get_recession_end():
gdp = get_gdp()
diffs = get_gdp_diffs(gdp)
is_in_recession = False
for i in range(1, len(diffs)):
if is_in_recession and diffs[i] > 0 and diffs[i + 1] > 0:
return gdp.iloc[i + 1]['Quarter']
elif diffs[i] < 0 and diffs[i + 1] < 0:
is_in_recession = True
def get_recession_bottom():
return get_recession_start_and_bottom()[1]
def convert_housing_data_to_quarters():
dataframe = pandas.read_csv('City_Zhvi_AllHomes.csv', usecols=[
1, 2] + [i for i in range(51, 251)])
dataframe['State'] = dataframe.apply(
lambda row: states[row['State']], axis=1)
dataframe['2016-09'] = numpy.nan
dataframe.set_index(['State', 'RegionName'], inplace=True)
dataframe.columns = pandas.DatetimeIndex(
dataframe[dataframe.columns]).to_period('M')
dataframe = dataframe.resample('Q', axis=1).mean()
dataframe.rename(columns=lambda x: str(x).lower(), inplace=True)
return dataframe
def run_ttest():
housing_prices = convert_housing_data_to_quarters()
recession = get_recession_start_and_bottom()
recession_start = recession[0]
recession_bottom = recession[1]
university_towns = get_list_of_university_towns()
previous_column = housing_prices.columns[housing_prices.columns.get_loc(
recession_start) - 1]
housing_prices['PriceRatio'] = housing_prices[previous_column].div(
housing_prices[recession_bottom])
towns = university_towns.to_records(index=False).tolist()
group1 = housing_prices.loc[towns]
group2 = housing_prices.loc[-housing_prices.index.isin(towns)]
dataframe = pandas.merge(housing_prices.reset_index(
), university_towns, on=university_towns.columns.tolist(), indicator='_flag', how='outer')
group1 = dataframe[dataframe['_flag'] == 'both']['PriceRatio'].tolist()
group2 = dataframe[dataframe['_flag'] != 'both']['PriceRatio'].tolist()
result = ttest_ind(group1, group2, nan_policy='omit')
better = 'university town'
if result.statistic < 0:
better = 'non-university town'
return (result.pvalue < 0.01, result.pvalue, better)
| mit |
cldershem/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
nick-ng/TournamentPairer | tabulate.py | 16 | 39088 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or\
(isinstance(string, _binary_type) or isinstance(string, _text_type))\
and\
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:f:",
["help", "header", "output", "sep=", "float=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = "g"
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt), file=file)
if __name__ == "__main__":
_main()
| mit |
has2k1/plydata | plydata/options.py | 1 | 2747 | """
PlyData Options
"""
# Names of all the options
OPTIONS = {'modify_input_data'}
#: For actions where it may be more efficient, if ``True``
#: the verb modifies the input data. This may be worth it
#: for very large datasets.
#:
#: Examples
#: --------
#: ::
#:
#: import pandas a pd
#: from plydata.options import set_option
#:
#: df = pd.DataFrame({'x': [1, 2, 3]})
#:
#: df >> define(y='x+1')
#: 'y' in df # False
#:
#: set_option('modify_input_data', True)
#:
#: df >> define(y='x+1')
#: 'y' in df # True
modify_input_data = False
def get_option(name):
"""
Get plydata option
Parameters
----------
name : str
Name of the option
"""
if name not in OPTIONS:
raise ValueError("Unknown option {!r}".format(name))
return globals()[name]
def set_option(name, value):
"""
Set plydata option
Parameters
----------
name : str
Name of the option
value : object
New value of the option
Returns
-------
old : object
Old value of the option
See also
--------
:class:`options`
"""
old = get_option(name)
globals()[name] = value
return old
class options:
"""
Options context manager
The code in the context is run with the specified options.
This is a convenient wrapper around :func:`set_option` to
handle setting and unsetting of option values.
Parameters
----------
kwargs : dict
``{option_name: option_value}`` pairs.
Examples
--------
>>> import pandas as pd
>>> from plydata import define
>>> from plydata.options import options
>>> df = pd.DataFrame({'x': [0, 1, 2, 3]})
With the default options
>>> df2 = df >> define(y='2*x')
>>> df2
x y
0 0 0
1 1 2
2 2 4
3 3 6
>>> df
x
0 0
1 1
2 2
3 3
Using the context manager
>>> with options(modify_input_data=True):
... df3 = df >> define(z='3*x')
>>> df3
x z
0 0 0
1 1 3
2 2 6
3 3 9
>>> df
x z
0 0 0
1 1 3
2 2 6
3 3 9
>>> df is df3
True
The default options apply again.
>>> df4 = df >> define(w='4*x')
>>> df
x z
0 0 0
1 1 3
2 2 6
3 3 9
>>> df is df4
False
"""
def __init__(self, **kwargs):
self.old = {}
self.new = kwargs
def __enter__(self):
for name, value in self.new.items():
self.old[name] = set_option(name, value)
def __exit__(self, exc_type, exc_value, traceback):
for name, value in self.old.items():
set_option(name, value)
| bsd-3-clause |
KarlTDebiec/Ramaplot | AnalyticalDataset.py | 1 | 8668 | # -*- coding: utf-8 -*-
# ramaplot.AnalyticalDataset.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages analytical Ramachandran plot datasets.
.. todo:
- Use parmed? Should be able to support many force fields
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
################################### CLASSES ###################################
class AnalyticalDataset(object):
"""
Manages analytical Ramachandran plot datasets.
Vn
------- * (1 + cos(periodicity * x + phase))
divider
"""
@staticmethod
def get_cache_key(infile, phi=None, psi=None, *args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
"""
from os.path import expandvars
return (AnalyticalDataset, expandvars(infile),
AnalyticalDataset.process_term_arg(phi),
AnalyticalDataset.process_term_arg(psi))
@staticmethod
def get_cache_message(cache_key):
"""
Generates message to be used when reloading previously-loaded
dataset.
Arguments:
cache_key (tuple): key with which dataset object is stored
in dataset cache
Returns:
cache_message (str): message to be used when reloading
previously-loaded dataset
"""
return "previously loaded from '{0}'".format(cache_key[1])
@staticmethod
def load_parm(infile, dataset_cache=None, verbose=1, **kwargs):
"""
"""
from .AmberForceField import AmberForceField
if "dataset_cache" is not None:
cache_key = AmberForceField.get_cache_key(parm=infile, **kwargs)
if cache_key in dataset_cache:
if verbose >= 1:
print(AmberForceField.get_cache_message(cache_key))
return dataset_cache[cache_key]
else:
if verbose >= 1:
print("loading from '{0}'".format(infile))
dataset_cache[cache_key] = AmberForceField(parm=infile,
verbose=verbose-1, **kwargs)
return dataset_cache[cache_key]
else:
if verbose >= 1:
print("loading from '{0}'".format(infile))
return AmberForceField(parm=infile, verbose=verbose-1, **kwargs)
@staticmethod
def process_term_arg(terms=None):
"""
Processes torsion term arguments
Arguments:
terms (str, list): torsion term(s) to be loaded from parm
file
Returns:
out_terms (tuple): processed terms
"""
import six
out_terms = []
# May be "C -N -CX-C"
if terms is None:
pass
elif isinstance(terms, six.string_types):
out_terms.append([terms, 0.0])
elif isinstance(terms, list):
# May be ["C -N -CX-C "]
if (len(terms) == 1
and isinstance(terms[0], six.string_types)):
out_terms.append([terms[0], 0.0])
# May be ["C -N -CX-C ", 120]
elif (len(terms) == 2
and isinstance(terms[0], six.string_types)
and (isinstance(terms[1], float)
or isinstance(terms[1], int))):
out_terms.append(terms)
# May be [["C -N -CX-C ", 120], "C -TN-CX -C "]
else:
for in_term in terms:
if isinstance(in_term, six.string_types):
out_terms.append([in_term, 0.0])
elif isinstance(in_term, list):
if (len(in_term) == 1
and isinstance(in_term[0], six.string_types)):
out_terms.append([in_term[0], 0.0])
elif (len(in_term) == 2
and isinstance(in_term[0], six.string_types)
and (isinstance(in_term[1], float)
or isinstance(in_term[1], int))):
out_terms.append(in_term)
else:
raise()
else:
raise()
else:
raise()
return tuple(tuple(x) for x in out_terms)
def __init__(self, infile, verbose=1, **kwargs):
"""
Initializes dataset.
Arguments:
infile (str): Path to Amber parm text file, may contain
environment variables
verbose (int): Level of verbose output
"""
from os.path import expandvars
from warnings import warn
import six
import pandas as pd
import numpy as np
from .AmberForceField import AmberForceField
# Load or reload data
infile = expandvars(infile)
ff = self.load_parm(infile, verbose=verbose, **kwargs)
# Initialize
torsions = ff.parameters["dihedrals"]
dist = np.zeros((360, 360))
grid = np.linspace(-180, 180, 360)
# Apply torsion terms
for dim in ["phi", "psi"]:
terms = AnalyticalDataset.process_term_arg(kwargs.get(dim))
# Apply each term in selected dimension
for term in terms:
# Load offset if provided
term, offset = term
type_1, type_2, type_3, type_4 = [t.strip()
for t in term.split("-")]
dim_torsions = torsions[(torsions["type_1"] == type_1) &
(torsions["type_2"] == type_2) &
(torsions["type_3"] == type_3) &
(torsions["type_4"] == type_4)]
if dim_torsions.size == 0:
dim_torsions = torsions[(torsions["type_4"] == type_1) &
(torsions["type_3"] == type_2) &
(torsions["type_2"] == type_3) &
(torsions["type_1"] == type_4)]
if dim_torsions.size == 0:
raise Exception(
"Term '{0:2}-{1:2}-{2:2}-{3:2}' ".format(
type_1, type_2, type_3, type_4) +
"not present in '{0}'".format(expandvars(infile)))
elif verbose >= 2:
warn("Term '{0:2}-{1:2}-{2:2}-{3:2}' ".format(
type_1, type_2, type_3, type_4) +
"not present in '{0}';".format(expandvars(infile)) +
"Term '{0:2}-{1:2}-{2:2}-{3:2}' ".format(
type_4, type_3, type_2, type_1) +
"is present and will be used")
if verbose >= 2:
print(dim_torsions[["type_1", "type_2", "type_3", "type_4",
"divider", "barrier", "phase", "periodicity"]])
for index, torsion in dim_torsions.iterrows():
divider = float(torsion["divider"])
barrier = float(torsion["barrier"])
phase = float(torsion["phase"])
periodicity = float(torsion["periodicity"])
torsion_pe = barrier / divider * (1 +
np.cos(
np.deg2rad(
np.abs(periodicity) * grid +
phase + offset)))
if dim == "phi":
dist += torsion_pe[:,np.newaxis]
else:
dist += torsion_pe
dist -= np.min(dist)
# Organize data
self.x_centers = grid
self.y_centers = grid
self.dist = dist
self.x_width = np.mean(grid[1:] - grid[:-1])
self.y_width = np.mean(grid[1:] - grid[:-1])
self.x_bins = np.linspace(grid[0] - self.x_width / 2,
grid[-1] + self.x_width / 2,
grid.size + 1)
self.y_bins = np.linspace(grid[0] - self.y_width / 2,
grid[-1] + self.y_width / 2,
grid.size + 1)
| bsd-3-clause |
pgaines937/news_articles | news_articles/data/post_process_data.py | 1 | 5756 | #!/usr/bin/env python3
#
# Post Processor for Google Finance Spider scraped data
# Name: Patrick Gaines
#
import fileinput
from pymongo import MongoClient
import pandas as pd
import json
import csv
import time
MONGODB_URI = 'mongodb://localhost:27017'
MONGODB_DATABASE = 'scrapy'
ARTICLES_COLLECTION = 'articles'
ARTICLES_FLATTENED_COLLECTION = 'articles_flattened'
STOCK_COLLECTION = 'stock_prices'
ARTICLES_DATA = 'articles.json'
ARTICLES_CSV = 'articles.csv'
STOCK_DATA = 'NASDAQ_GOOG.json'
STOCK_CSV = 'NASDAQ_GOOG.csv'
FINAL_DATASET = 'dataset.csv'
def convert_json_to_csv(articles_json, articles_csv, dataset_csv):
try:
item_list = []
page = open(articles_json, "r", encoding="utf8")
json_str = page.read()
print(json_str)
data_list = list(json_str.split('\n'))
print(data_list)
for item in data_list:
if item:
parsed_json = json.loads(item)
print(parsed_json)
item_list.append(parsed_json)
item_dict = item_list.pop(0)
item_dict.pop('_id')
item_dict.pop('url')
item_dict.pop('headline_text')
#item_dict.pop('article_text')
for item2 in item_list:
item2.pop('_id')
item2.pop('url')
item2.pop('headline_text')
#item2.pop('article_text')
for key, values in item2.items():
for value in values:
item_dict[key].append(value)
print(list(item_dict.keys()))
print(list(item_dict.values()))
flat_list_of_dicts = []
max_index = len(item_dict.keys()) - 1
while list(item_dict.values())[max_index]:
#record = { "url" : None, "headline_text" : None, "publish_date" : None, "sentiment_subjectivity" : None, "sentiment_polarity" : None, "article_text" : None }
record = { "publish_date" : None, "sentiment_subjectivity" : None, "sentiment_polarity" : None }
for key, value in item_dict.items():
record[key] = value.pop()
flat_list_of_dicts.append(record)
print(flat_list_of_dicts)
with open(articles_csv, 'w+', encoding="utf8") as f: # Just use 'w' mode in 3.x
w = csv.writer(f)
flat_list_of_dicts[0]['Date'] = 'None'
w.writerow(flat_list_of_dicts[0].keys())
for item3 in flat_list_of_dicts:
if item3['publish_date']:
publish_date = item3['publish_date']['$date']
date_list = list(publish_date.split('T'))
item3['Date'] = date_list[0]
w.writerow(item3.values())
a = pd.read_csv(articles_csv)
b = pd.read_csv('NASDAQ_GOOG.csv')
b = b.dropna(axis=1)
merged = a.merge(b, on='Date')
merged.to_csv(dataset_csv, index=False)
except Exception as e:
print("Error: " + str(e))
def flatten_articles():
"""Flattens the nested articles into a dict"""
try:
article_data = {}
for articles in database.articles.find():
for key, value in articles.items():
print(key, value)
if not key in article_data:
print("Creating list")
article_data[key] = []
print("Appending list")
article_data[key] += value
print(article_data)
except Exception as e:
print("Error: " + str(e))
if __name__ == '__main__':
try:
# Getting Connection from MongoDB
conn = MongoClient(MONGODB_URI)
# Connecting to MongoDB
print("Connecting to database in MongoDB named as " + MONGODB_DATABASE)
database = conn[MONGODB_DATABASE]
# Creating a collection named articles in MongoDB
print("Creating a collection in " + MONGODB_DATABASE + " named as " + ARTICLES_COLLECTION)
articles_collection = database[ARTICLES_COLLECTION]
# Creating a collection named articles_flattened in MongoDB
print("Creating a collection in " + MONGODB_DATABASE + " named as " + ARTICLES_FLATTENED_COLLECTION)
articles_flattened_collection = database[ARTICLES_FLATTENED_COLLECTION]
# Creating a collection named stock_prices in MongoDB
print("Creating a collection in " + MONGODB_DATABASE + " named as " + STOCK_COLLECTION)
stock_collection = database[STOCK_COLLECTION]
# Loading stock data from a json file to MongoDB
#print("Loading NASDAQ_GOOG.json file into the " + QUANDL_DATA + " present inside the database " + MONGODB_DATABASE)
#loadJsonIntoDB("NASDAQ_GOOG.json", collection)
print("Concatenating articles")
filenames = ['articles2.json', 'articles3.json', 'articles4.json']
with open('combined_articles.json', 'w', encoding='utf8') as fout, fileinput.input(filenames, openhook=fileinput.hook_encoded("utf-8")) as fin:
for line in fin:
fout.write(line)
time.sleep(3)
# Converting articles to csv and cross joining on stock data
print("Converting articles to csv and cross joining on stock data")
convert_json_to_csv('combined_articles.json', 'combined_articles_verbose.csv', 'combined_dataset_verbose.csv')
# Loading stock data from a json file to MongoDB
#print("Loading " + STOCK_DATA + " file in the " + STOCK_COLLECTION + " present inside the database " + MONGODB_DATABASE)
#convert_json_to_csv(STOCK_DATA, stock_collection)
# Flatten the articles collection
#print("Flattening the articles collection")
#flatten_articles()
except Exception as detail:
print("Error ==> ", detail)
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/excel/test_xlsxwriter.py | 2 | 1980 | import warnings
import pytest
from pandas import DataFrame
from pandas.util.testing import ensure_clean
from pandas.io.excel import ExcelWriter
xlsxwriter = pytest.importorskip("xlsxwriter")
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
def test_column_format(ext):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
openpyxl = pytest.importorskip("openpyxl")
with ensure_clean(ext) as path:
frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = "#,##0"
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({"num_format": num_format})
write_worksheet.set_column("B:B", None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook["Sheet1"]
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1")
# Get the number format from the cell.
try:
cell = read_worksheet["B2"]
except TypeError:
# compat
cell = read_worksheet.cell("B2")
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(ext):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine="xlsxwriter", mode="a")
| apache-2.0 |
del680202/MachineLearning-memo | src/lr/logistic-regression.py | 1 | 1660 | #!/usr/bin/env python
# encoding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import math
#網路上找的dataset 可以線性分割
dataset = np.array([
((1, -0.4, 0.3), 0),
((1, -0.3, -0.1), 0),
((1, -0.2, 0.4), 0),
((1, -0.1, 0.1), 0),
((1, 0.6, -0.5), 0), #non-linear point
((1, 0.8, 0.7), 1),
((1, 0.9, -0.5), 1),
((1, 0.7, -0.9), 1),
((1, 0.8, 0.2), 1),
((1, 0.4, -0.6), 1)])
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def gradient(dataset, w):
g = np.zeros(len(w))
for x,y in dataset:
x = np.array(x)
error = sigmoid(w.T.dot(x))
g += (error - y) * x
return g / len(dataset)
def cost(dataset, w):
total_cost = 0
for x,y in dataset:
x = np.array(x)
error = sigmoid(w.T.dot(x))
total_cost += abs(y - error)
return total_cost
def logistic(dataset):
w = np.zeros(3)
limit = 10
eta = 1
costs = []
for i in range(limit):
current_cost = cost(dataset, w)
print "current_cost=",current_cost
costs.append(current_cost)
w = w - eta * gradient(dataset, w)
eta *= 0.95
plt.plot(range(limit), costs)
plt.show()
return w
#執行
w = logistic(dataset)
#畫圖
ps = [v[0] for v in dataset]
fig = plt.figure()
ax1 = fig.add_subplot(111)
#dataset前半後半已經分割好 直接畫就是
ax1.scatter([v[1] for v in ps[:5]], [v[2] for v in ps[:5]], s=10, c='b', marker="o", label='O')
ax1.scatter([v[1] for v in ps[5:]], [v[2] for v in ps[5:]], s=10, c='r', marker="x", label='X')
l = np.linspace(-2,2)
a,b = -w[1]/w[2], -w[0]/w[2]
ax1.plot(l, a*l + b, 'b-')
plt.legend(loc='upper left');
plt.show()
| apache-2.0 |
agartland/utils | dfprint.py | 1 | 4104 | import time
import subprocess
import os.path as op
import pandas as pd
import os
from functools import partial
__all__ = ['toPNG', 'toPDF']
def toPNG(df, outFn, dpi=200, **kwargs):
assert outFn[-4:] == '.png'
folder, fn = op.split(outFn)
pdfFn = outFn.replace('.png', '.pdf')
toPDF(df, pdfFn, **kwargs)
cmd = ['convert',# '-interaction=nonstopmode',
'-density %d' % dpi,
'-alpha off',
pdfFn,
'PNG32:%s' % outFn]
#print ' '.join(cmd)
if 'hideConsole' in kwargs and kwargs['hideConsole']:
try:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
#si.wShowWindow = subprocess.SW_HIDE # default
except:
si = None
else:
si = None
devnull = open(os.devnull, 'w')
subprocess.check_call(' '.join(cmd),
shell=True,
startupinfo=si,
stdout=devnull,
stderr=devnull)
devnull.close()
removeAuxFiles(outFn)
def toPDF(df,
outFn,
titStr='',
float_format='%1.3g',
index=False,
hideConsole=True,
landscape=True,
legal=False,
margin=1):
if landscape:
orientation = 'landscape'
else:
orientation = 'portrait'
if not legal:
paper = 'letterpaper'
else:
paper = 'legalpaper'
folder, fn = op.split(outFn)
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
def repChar(s, c1, c2):
if not isinstance(s, str):
return s
else:
return s.replace(c1, c2)
if not df.empty:
for func in [partial(repChar, c1='_', c2='-'),
partial(repChar, c1='%', c2='\\%')]:
df = df.applymap(func)
df = df.rename(func, axis=0)
df = df.rename(func, axis=1)
texFn = outFn[:-3] + 'tex'
header = ['\\documentclass[10pt]{article}',
'\\usepackage{lmodern}',
'\\usepackage{booktabs}',
'\\usepackage{longtable}',
'\\usepackage{geometry}',
'\\usepackage[english]{babel}',
'\\usepackage[utf8]{inputenc}',
'\\usepackage{fancyhdr}',
'\\geometry{%s, %s, margin=%1.1fin}' % (paper, orientation, margin),
'\\pagestyle{fancy}',
'\\fancyhf{}',
'\\rhead{%s}' % time.ctime(),
'\\chead{%s}' % titStr,
'\\rfoot{Page \\thepage}',
'\\renewcommand{\\familydefault}{\\sfdefault}'
'\\begin{document}']
#'\\section*{%s}' % titStr]
footer = ['\\end{document}']
with open(texFn, 'w', encoding='utf-8') as fh:
for h in header:
fh.write(h + '\n')
sout = df.to_latex(float_format=lambda f: float_format % f,
longtable=True, index=index, escape=False)
fh.write(sout)
for f in footer:
fh.write(f + '\n')
cmd = ['latex',
'-output-format=pdf',
'-output-directory=%s' % folder,
texFn]
if hideConsole:
try:
"""This won't work in linux"""
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# si.wShowWindow = subprocess.SW_HIDE # default
except:
si = None
cmd.insert(2, '-interaction=nonstopmode')
else:
si = None
devnull = open(os.devnull, 'w')
for i in range(2):
"""Run latex twice to get the layout correct"""
subprocess.call(cmd,
startupinfo=si,
stdout=devnull,
stderr=devnull)
devnull.close()
removeAuxFiles(outFn)
def removeAuxFiles(outFn):
extensions = ['aux', 'log', 'tex']
for ext in extensions:
fn = outFn[:-3] + ext
try:
os.remove(fn)
except OSError:
pass
| mit |
montilab/Hydra | hydra_pkg/bamqc.py | 2 | 28571 | #Copyright 2015 Daniel Gusenleitner, Stefano Monti
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Bamqc module
This module contains functions for running the custom bamqc script as
well as multiple reporting and plotting functions to add the QC into the
report html
"""
import os
import subprocess
from hydra_pkg import helper as HELPER
from hydra_pkg import module_helper as MODULE_HELPER
import numpy as np
import matplotlib.pyplot as plt
import pylab
def copy_files(param):
"""Copies all relevant bamqc files from the results directory into the report directory
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#if there is no bamqc directory in the report make one
param['bamqc_dir'] = param['working_dir'] + 'report/bamqc/'
if not os.path.exists(param['bamqc_dir']):
os.makedirs(param['bamqc_dir'])
#get the files that are actually in the output directory
call = ['ls', param['working_dir']+'results/bamqc/']
output, _ = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
present_files = [line for line in output.split('\n') if line != '']
#use only the stubs that are actually present
temp = []
for bqc_stub in param['stub']:
if bqc_stub in present_files:
temp.append(bqc_stub)
param['bamqc_stub'] = temp
#copy the unpacked directories
for stub in param['bamqc_stub']:
call = ['cp', '-R']
call.append(param['working_dir'] + 'results/bamqc/' + stub)
call.append(param['bamqc_dir'])
output, _ = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
def create_overview_table(param):
"""Function that creates an html overview table continaing the most important bamQC statistics
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#create a table
table = []
table.append([stub for stub in param['bamqc_stub']])
num_s = len(param['bamqc_stub'])
#link to summary files
temp = ['Summary files']
for stub in param['bamqc_stub']:
temp.append(['<a href="' + stub + '/output.txt">raw</a>'])
table.append(temp)
#link to overview files
temp = ['Full report']
for stub in param['bamqc_stub']:
temp.append('<a href="' +
stub +
'/sample_stats.html"><img src="../Icons/fastqc_icon.png"></a></td>')
table.append(temp)
#percent aligned makes only sense if we actually ran an alignment
if param['aligner'] != 'skip':
#header
table.append(['Percentages based on total number of reads:'])
#percent aligned
table.append(['Percent aligned'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['single_count_alignments'],
number2=param['num_total_reads'],
ntotal=num_s))
#percent uniquely aligned
table.append(['Percent uniquely aligned'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['unique_aligned_reads'],
number2=param['num_total_reads'],
ntotal=num_s))
#header
table.append(['Percentages based on total number of alignments:'])
#percent single reads
table.append(['Percent single end reads'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['is_singleton'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
#percent paired reads
table.append(['Percent paired end reads'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['is_paired'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
#percent proper paired reads
table.append(['Percent proper paired reads'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['is_proper_pair'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
#percent spliced reads
table.append(['Percent spliced reads'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['spliced_reads'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
#percent insert reads
table.append(['Percent of reads with inserts'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['reads_with_inserts'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
#percent deletion reads
table.append(['Percent of reads with deletions'] +
MODULE_HELPER.get_percentage(number1=param['bam_qc']['reads_with_deletions'],
number2=param['bam_qc']['total_aligned_reads'],
ntotal=num_s))
HELPER.write_html_table(param,
table,
out=param['bamqc_report'],
cell_width=65)
def read_raw_bamqc(param):
"""Reads the raw output from the bamqc run of a single bamqc run
:Parameter param: - dictionary that contains all general RNASeq pipeline parameters
"""
summary_files = []
for idx in range(len(param['bamqc_stub'])):
summary_files.append(param['bamqc_dir'] +
param['bamqc_stub'][idx] +
'/output.txt')
bamqc = dict()
#add entries into bamqc dictionary
filehandle = open(summary_files[0])
name_list = []
for name in filehandle.readlines():
bamqc[name.split('\t')[0].strip()] = []
name_list.append(name.split('\t')[0].strip())
filehandle.close()
#fill bamqc dictionary
for sum_file in summary_files:
filehandle = open(sum_file)
for name in filehandle.readlines():
bamqc[name.split('\t')[0].strip()].append(\
float(name.split('\t')[1].rstrip()))
filehandle.close()
#get the actual counts
param['bam_qc'] = bamqc
param['bam_qc']['single_count_alignments'] = \
[sum([v[i] for k, v in bamqc.items() \
if 'num_multiread' in k]) \
for i in range(len(bamqc['num_multiread 1']))]
param['bam_qc']['num_total_reads'] = param['num_total_reads']
key_list = ['num_total_reads', 'single_count_alignments']
key_list.extend(name_list)
filehandle = open(param['bamqc_dir'] + 'overview.txt', 'w')
filehandle.write(' \t' + '\t'.join(param['bamqc_stub']) + '\n')
for nam in key_list:
par = param['bam_qc'][nam]
if type(par) is float:
par = [par]
filehandle.write(nam +
'\t' +
'\t'.join([str(vv) for vv in par])+'\n')
filehandle.close()
def get_bar_width(fig_width, param):
#calculates the bar plot width
bar_width = 0.8
if len(param['bamqc_stub']) > 20:
bar_width = fig_width / float(len(param['bamqc_stub'])) * 10
return bar_width
def plot_alignments(param):
"""Creates a plot that contains the statistic on the number of aligned reads
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#values:
unique = param['bam_qc']['unique_aligned_reads'][:]
if type(param['bam_qc']['single_count_alignments']) is not list:
param['bam_qc']['single_count_alignments'] = [param['bam_qc']['single_count_alignments']]
aligned = param['bam_qc']['single_count_alignments'][:]
total = param['bam_qc']['num_total_reads'][:]
total = [(total[i]-aligned[i]) for i in range(len(param['bamqc_stub']))]
aligned = [(aligned[i]-unique[i]) for i in range(len(param['bamqc_stub']))]
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
rects1 = plt.bar(index,
total,
bar_width,
bottom=param['bam_qc']['single_count_alignments'],
alpha=opacity,
color='b')
rects2 = plt.bar(index,
aligned,
bar_width,
bottom=unique,
alpha=opacity,
color='r')
rects3 = plt.bar(index,
unique,
bar_width,
alpha=opacity,
color='g')
plt.xlabel('Samples')
plt.ylabel('Total (aligned) reads')
plt.title('Number of reads across samples')
ticks = param['bamqc_stub']
plt.legend((rects1[0],
rects2[0],
rects3[0]),
('Total reads', 'Aligned reads', 'Uniquely aligned'),
loc='lower left')
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/aligned_reads.png'
pylab.savefig(param['working_dir'] + filename)
param['bamqc_report'].write('<img src="aligned_reads.png"' +
' alt="number of aligned reads"><br><br>\n')
def plot_spliced_reads(param):
"""Creates a plot that contains the statistic on the number of spliced reads
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
percent = [0.0]*len(param['bamqc_stub'])
for idx in range(len(param['bamqc_stub'])):
percent[idx] = round(float(param['bam_qc']['spliced_reads'][idx]) /
float(param['bam_qc']['single_count_alignments'][idx])
* 100, 3)
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
_ = plt.bar(index, percent, bar_width, alpha=opacity, color='b')
plt.xlabel('Samples')
plt.ylabel('Percentage of spliced reads of all aligned reads')
plt.title('Percentage of spliced reads across samples')
ticks = param['bamqc_stub']
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/spliced_reads.png'
pylab.savefig(param['working_dir'] + filename)
param['bamqc_report'].write('<img src="spliced_reads.png" ' +
'alt="number of spliced reads"><br><br>\n')
def plot_insert_reads(param):
"""Creates a plot that contains the statistic on the number of reads with inserts
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
percent = [0.0]*len(param['bamqc_stub'])
for idx in range(len(param['bamqc_stub'])):
percent[idx] = round(float(param['bam_qc']['reads_with_inserts'][idx]) /
float(param['bam_qc']['single_count_alignments'][idx])
* 100, 3)
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
_ = plt.bar(index, percent, bar_width, alpha=opacity, color='b')
plt.xlabel('Samples')
plt.ylabel('Percent of reads with inserst of all aligned reads')
plt.title('Percent of reads with inserts across samples')
ticks = param['bamqc_stub']
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/insert_reads.png'
pylab.savefig(param['working_dir']+filename)
param['bamqc_report'].write('<img src="insert_reads.png" '+
'alt="number of inserted reads"><br><br>\n')
def plot_delete_reads(param):
"""Creates a plot that contains the statistic on the number of reads that contain deletions
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
percent = [0.0] * len(param['bamqc_stub'])
for idx in range(len(param['bamqc_stub'])):
percent[idx] = round(float(param['bam_qc']['reads_with_deletions'][idx]) /
float(param['bam_qc']['single_count_alignments'][idx])
* 100, 3)
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
_ = plt.bar(index, percent, bar_width, alpha=opacity, color='b')
plt.xlabel('Samples')
plt.ylabel('Percent of reads with deletions of all aligned reads')
plt.title('Percent of reads with deletion across samples')
ticks = param['bamqc_stub']
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/delete_reads.png'
pylab.savefig(param['working_dir']+filename)
param['bamqc_report'].write('<img src="delete_reads.png"' +
' alt="number of deleted reads"><br><br>\n')
def plot_paired_singleton(param):
"""Creates a plot that contains the statistics on the number of paired,
proper paired and singleton reads
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#values:
single = param['bam_qc']['is_singleton'][:]
paired = param['bam_qc']['is_paired'][:]
proper = param['bam_qc']['is_proper_pair'][:]
paired_bottom = [(single[i] + proper[i]) for i in range(len(param['bamqc_stub']))]
paired = [paired[i] - proper[i] for i in range(len(param['bamqc_stub']))]
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
rects1 = plt.bar(index,
paired,
bar_width,
bottom=paired_bottom,
alpha=opacity,
color='b')
rects2 = plt.bar(index,
proper,
bar_width,
bottom=single,
alpha=opacity,
color='r')
rects3 = plt.bar(index, single, bar_width, alpha=opacity, color='g')
plt.xlabel('Samples')
plt.ylabel('Number of single / paired / proper paired reads')
plt.title('Number of reads by type')
ticks = param['bamqc_stub']
plt.legend((rects1[0],
rects2[0],
rects3[0]),
('Paired end reads',
'Proper paired reads',
'Single end reads'),
loc='lower left')
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/paired_reads.png'
pylab.savefig(param['working_dir']+filename)
param['bamqc_report'].write('<img src="paired_reads.png"' +
' alt="number of paired reads"><br><br>\n')
def plot_mismatches(param):
"""Creates a plot that split the reads by number of mismatches
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#values:
mm0 = []
for i in range(len(param['bamqc_stub'])):
mm0.append(param['bam_qc']['num_unique_mismatches 0'][i]+
param['bam_qc']['num_multiple_mismatches 0'][i])
mm1 = []
for i in range(len(param['bamqc_stub'])):
mm1.append(param['bam_qc']['num_unique_mismatches 1'][i]+
param['bam_qc']['num_multiple_mismatches 1'][i])
mm2 = []
for i in range(len(param['bamqc_stub'])):
mm2.append(param['bam_qc']['num_unique_mismatches 2'][i]+
param['bam_qc']['num_multiple_mismatches 2'][i])
mm3 = []
for i in range(len(param['bamqc_stub'])):
mm3.append(param['bam_qc']['num_unique_mismatches 3'][i]+
param['bam_qc']['num_multiple_mismatches 3'][i])
mm4 = []
for i in range(len(param['bamqc_stub'])):
mm4.append(param['bam_qc']['num_unique_mismatches 4'][i]+
param['bam_qc']['num_multiple_mismatches 4'][i])
#make it cummulative
bot_mm2 = [mm1[i]+mm0[i] for i in range(len(param['bamqc_stub']))]
bot_mm3 = [bot_mm2[i]+mm2[i] for i in range(len(param['bamqc_stub']))]
bot_mm4 = [bot_mm3[i]+mm3[i] for i in range(len(param['bamqc_stub']))]
#create plot
fig, _ = plt.subplots()
fig_width = min (MODULE_HELPER.get_max_image_width(), 5 + len(param['bamqc_stub']) * 0.4)
fig.set_size_inches(fig_width, 8)
index = np.arange(len(param['bamqc_stub']))
bar_width = get_bar_width(fig_width, param)
opacity = 0.4
rects1 = plt.bar(index, mm0, bar_width,
alpha=opacity, color='b')
rects2 = plt.bar(index, mm1, bar_width,
bottom=mm0, alpha=opacity, color='r')
rects3 = plt.bar(index, mm2, bar_width,
bottom=bot_mm2, alpha=opacity, color='g')
rects4 = plt.bar(index, mm3, bar_width,
bottom=bot_mm3, alpha=opacity, color='#555555')
rects5 = plt.bar(index, mm4, bar_width, bottom=bot_mm4,
alpha=opacity, color='#ffff00')
plt.xlabel('Samples')
plt.ylabel('Mismatches')
plt.title('Number of mismatches across samples')
ticks = param['bamqc_stub']
plt.legend((rects1[0],
rects2[0],
rects3[0],
rects4[0],
rects5[0]),
('Perfect match',
'1 mismatch',
'2 mismatches',
'3 mismatches',
'4+ mismatches'),
loc='lower left')
if fig_width != MODULE_HELPER.get_max_image_width():
plt.xticks(index + bar_width / 2, ticks, rotation='vertical')
plt.tight_layout()
#put it into the report
filename = 'report/bamqc/mismatches.png'
pylab.savefig(param['working_dir']+filename)
param['bamqc_report'].write('<img src="mismatches.png" ' +
'alt="number of mismatches"><br><br>\n')
def plot_overview_alignments(param):
#create a list with all relevant values
overview = []
num_s = len(param['bamqc_stub'])
#bring the data into required shape
overview.append(MODULE_HELPER.divide(param['bam_qc']['unique_aligned_reads'],
param['num_total_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['single_count_alignments'],
param['num_total_reads'],
num_s))
#make the first plot out of the first 2:
fig, ax = plt.subplots()
fig.set_size_inches(9, 1.5)
bp = ax.boxplot(overview, patch_artist=True, vert=False)
#change coloring
for box in bp['boxes']:
box.set( color='#7570b3', linewidth=2)
box.set( facecolor = '#999999' )
#change caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
#change outliers
for flier in bp['fliers']:
flier.set(marker='o', color='#ff0000', alpha=0.5)
ax.set_yticklabels(['Percent uniquely aligned',
'Percent aligned'])
ax.set_xlim(-5,105)
#put it into the report
filename = 'report/bamqc/overview_alignment.png'
fig.savefig(param['working_dir']+filename,
bbox_inches='tight')
param['report'].write('Percentages based on number of reads<br>')
param['report'].write('<img src="bamqc/overview_alignment.png" ' +
'alt="overview"><br><br>\n')
def plot_overview(param):
#create a list with all relevant values
overview = []
num_s = len(param['bamqc_stub'])
#bring the data into required shape
overview.append(MODULE_HELPER.divide(param['bam_qc']['reads_with_deletions'],
param['bam_qc']['total_aligned_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['reads_with_inserts'],
param['bam_qc']['total_aligned_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['spliced_reads'],
param['bam_qc']['total_aligned_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['is_proper_pair'],
param['bam_qc']['total_aligned_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['is_paired'],
param['bam_qc']['total_aligned_reads'],
num_s))
overview.append(MODULE_HELPER.divide(param['bam_qc']['is_singleton'],
param['bam_qc']['total_aligned_reads'],
num_s))
#make the first plot out of the first 2:
fig, ax = plt.subplots()
fig.set_size_inches(9, len(overview) / 2 + 0.5)
bp = ax.boxplot(overview, patch_artist=True, vert=False)
#change coloring
for box in bp['boxes']:
box.set( color='#7570b3', linewidth=2)
box.set( facecolor = '#999999' )
#change caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
#change outliers
for flier in bp['fliers']:
flier.set(marker='o', color='#ff0000', alpha=0.5)
ax.set_yticklabels(['Percent of reads with deletions',
'Percent of reads with inserts',
'Percent spliced reads',
'Percent proper paired reads',
'Percent paired end reads',
'Percent single end reads',
'Percent uniquely aligned',
'Percent aligned'])
ax.set_xlim(-5,105)
#put it into the report
filename = 'report/bamqc/overview.png'
fig.savefig(param['working_dir']+filename,
bbox_inches='tight')
param['report'].write('Percentages based on number of alignments <br>')
param['report'].write('<img src="bamqc/overview.png" ' +
'alt="overview"><br><br>\n')
def plot_total_number_reads(param):
#total number of aligned reads
overview = [param['bam_qc']['total_aligned_reads'],
param['num_total_reads']]
#make the first plot out of the first 2:
fig, ax = plt.subplots()
fig.set_size_inches(9, 1.5)
bp = ax.boxplot(overview, patch_artist=True, vert=False)
#change coloring
for box in bp['boxes']:
box.set( color='#7570b3', linewidth=2)
box.set( facecolor = '#999999' )
#change caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
#change outliers
for flier in bp['fliers']:
flier.set(marker='o', color='#ff0000', alpha=0.5)
ax.set_yticklabels(['Number of alignments','Number of reads'])
ax.set_xlim(-5,max(max(overview[0]),max(overview[0])) * 1.05)
#put it into the report
filename = param['working_dir']+'report/bamqc/total_reads.png'
fig.savefig(filename, bbox_inches='tight')
param['report'].write('<img src="bamqc/total_reads.png" ' +
'alt="overview"><br><br>\n')
def report(param):
"""This function creates a full html report for the bamQC and also
copies all relevant files
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
param['report'].write('<center><br><br><h2>Bam QC results</h2>')
copy_files(param)
if len(param['bamqc_stub']) > 0:
read_raw_bamqc(param)
#create a separate bamqc report html
param['bamqc_report'] = open(param['working_dir']+'report/bamqc/bamqc.html', 'w')
param['bamqc_report'].write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '+
'Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1'+
'-strict.dtd"><head><title></title></head><body>\n')
param['bamqc_report'].write('<center><h1>Bam QC Report</h1></center>')
param['bamqc_report'].write('<a href="overview.txt">' +
'QC results as tab delimited file</a><br><br><br>')
create_overview_table(param)
plot_alignments(param)
plot_mismatches(param)
plot_paired_singleton(param)
plot_spliced_reads(param)
plot_insert_reads(param)
plot_delete_reads(param)
HELPER.report_finish(param['bamqc_report'])
#add the bamqc html to the report
param['report'].write('<a href="bamqc/bamqc.html">Full report</a><br>')
#Overview plots in the main report
plot_total_number_reads(param)
plot_overview_alignments(param)
plot_overview(param)
else:
param['report'].write('There were no results to show.')
def init(param):
"""Initialization function, that checks if the bamqc_script that is run
on every single samples is available
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
MODULE_HELPER.check_parameter(param, key='bamqc_script', dtype=str)
def main():
"""Main function that is run on each samples, which in turn calls the
actual bamqc running script to extract the QC statistics
"""
import sys
param = MODULE_HELPER.initialize_module()
#run create output directory
outdir = param['module_dir']+param['outstub']+'/'
if not os.path.exists(outdir):
os.makedirs(outdir)
call = [param['bamqc_script'],
'-i', param['working_file'],
'-o', outdir]
param['file_handle'].write('CALL: '+' '.join(call)+'\n')
output, error = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
param['file_handle'].write(error)
param['file_handle'].write(output)
#if outputfile doesn't exits?
if not os.path.exists(outdir+'stats.json'):
param['file_handle'].write('QC did not finish correctly..')
sys.exit()
MODULE_HELPER.wrapup_module(param)
| apache-2.0 |
imito/odin | odin/ml/plda.py | 1 | 14749 | # -*- coding: utf-8 -*-
""""
author: 'Omid Sadjadi, Timothee Kheyrkhah'
email: '[email protected]'
"""
import time
import warnings
from numbers import Number
import numpy as np
from scipy.linalg import cholesky, eigh, inv, solve, svd
from six import string_types
from odin.backend import calc_white_mat, length_norm
from odin.ml.base import BaseEstimator, Evaluable, TransformerMixin
from odin.ml.scoring import (VectorNormalizer, compute_class_avg,
compute_within_cov)
from odin.utils import unique
def logdet(A):
u = cholesky(A)
y = 2 * np.log(np.diag(u)).sum()
return y
class PLDA(BaseEstimator, TransformerMixin, Evaluable):
""" Probabilistic LDA
Parameters
----------
n_phi : int
number of dimension for the latent space
centering : bool (default: True)
mean normalization the data before EM
wccn : bool (default: True)
within class covariance normalization before EM
unit_length : bool (default: True)
normalize vector length of each sample to 1 before EM
n_iter : {integer, 'auto'}
if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)
compared to the `improve_threshold`
improve_threshold : scalar
Only used in case `n_iter='auto'`
labels : {list of string, or None} (default: None)
labels information for `evaluate` method
seed : int
random seed for reproducibility
verbose : int (default: 0)
verbose level, 0 for turning off all logging activities,
1 for basics notification, 2 for fitting progress.
if `2`, compute log-likelihood during fitting EM, this will
significantly slows down the process, only suggested for debugging
Attributes
----------
Sigma_ : [feat_dim, feat_dim]
Phi_ : [feat_dim, n_phi]
Sb_ : [feat_dim, feat_dim]
St_ : [feat_dim, feat_dim]
Lambda : []
Uk : []
Q_hat : []
X_model_ : [num_class, feat_dim]
class-dependence feature vectors
"""
def __init__(self, n_phi=None,
centering=True, wccn=True, unit_length=True,
n_iter='auto', improve_threshold=1e-1,
labels=None, dtype='float64', random_state=None,
verbose=0):
super(PLDA, self).__init__()
# ====== check n_phi ====== #
if n_phi is not None:
n_phi = int(n_phi)
self.n_phi_ = n_phi
# ====== check num_iter ====== #
if isinstance(n_iter, string_types):
n_iter = n_iter.lower()
assert n_iter == 'auto', 'Invalid `n_iter` value: %s' % n_iter
elif isinstance(n_iter, Number):
assert n_iter > 0, "`n_iter` must greater than 0, but given: %d" % n_iter
self.n_iter_ = n_iter
self.improve_threshold_ = float(improve_threshold)
# ====== other ====== #
self.feat_dim_ = None
self._labels = labels
self.verbose_ = int(verbose)
# for normalization
self._normalizer = VectorNormalizer(
centering=centering, wccn=wccn, unit_length=unit_length,
lda=False, concat=False)
self._dtype = np.dtype(dtype)
# ====== check random state ====== #
if random_state is None:
self._rand_state = np.random.RandomState(None)
elif isinstance(random_state, Number):
self._rand_state = np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
self._rand_state = random_state
else:
raise ValueError("Invalid argument for `random_state`: %s" % str(random_state))
# Attributes
self.Sigma_ = None
self.Phi_ = None
self.Sb_ = None
self.St_ = None
# ==================== properties ==================== #
@property
def dtype(self):
return self._dtype
@property
def feat_dim(self):
return self.feat_dim_
@property
def normalizer(self):
return self._normalizer
@property
def labels(self):
return self._labels
@property
def num_classes(self):
return len(self._labels)
@property
def is_fitted(self):
if not hasattr(self, 'Lambda_') or \
not hasattr(self, 'Uk_') or \
not hasattr(self, 'Q_hat_') or \
not hasattr(self, 'X_model_'):
return False
return True
# ==================== Pickling ==================== #
def __getstate__(self):
if not self.is_fitted:
raise RuntimeError("The PLDA have not been fitted, nothing to pickle!")
return (self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,
self._normalizer, self._dtype, self._rand_state,
self.Sigma_, self.Phi_, self.Sb_, self.St_,
self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_)
def __setstate__(self, states):
(self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,
self._normalizer, self._dtype, self._rand_state,
self.Sigma_, self.Phi_, self.Sb_, self.St_,
self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_) = states
# ==================== helpers ==================== #
def initialize(self, X, labels):
feat_dim = X.shape[1]
if self.feat_dim is None or self._num_classes is None:
self.feat_dim_ = int(feat_dim)
if self._labels is None:
self._labels = labels
if self.feat_dim <= self.n_phi_:
raise RuntimeError("`feat_dim=%d` must be greater than `n_phi=%d`" %
(self.feat_dim, self.n_phi_))
# ====== initialize ====== #
# covariance matrix of the residual term
# self.Sigma_ = 1. / self.feat_dim * np.eye(self.feat_dim, dtype=self.dtype)
self.Sigma_ = (1. / self.feat_dim * np.eye(self.feat_dim) +
self._rand_state.randn(self.feat_dim, self.feat_dim)
).astype(self.dtype)
# self.Sigma_ = np.cov(X.T).astype(self.dtype)
# self.Sigma_ = (np.cov(X.T) +
# self._rand_state.randn(self.feat_dim, self.feat_dim)
# ).astype(self.dtype)
# self.Sigma_ = 100 * self._rand_state.randn(
# self.feat_dim, self.feat_dim).astype(self.dtype)
# factor loading matrix (Eignevoice matrix) [feat_dim, n_phi]
# self.Phi_ = np.r_[np.eye(self.n_phi_),
# np.zeros((self.feat_dim - self.n_phi_, self.n_phi_))]
# self.Phi_ = self._rand_state.randn(self.feat_dim, self.n_phi_).astype(self.dtype)
self.Phi_ = self.normalizer.transform(
self._rand_state.randn(self.n_phi_, self.feat_dim)
).T.astype(self.dtype)
self.Sb_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)
self.St_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)
# ====== validate the dimension ====== #
if self.feat_dim != feat_dim:
raise ValueError("Mismatch the input feature dimension, %d != %d" %
(self.feat_dim, feat_dim))
if self.num_classes != len(labels):
raise ValueError("Mismatch the number of output classes, %d != %d" %
(self.num_classes, len(labels)))
# ==================== sklearn ==================== #
def _update_caches(self):
# ====== update cached matrices for scoring ====== #
iSt = inv(self.St_) # [feat_dim, feat_dim]
iS = inv(self.St_ - np.dot(np.dot(self.Sb_, iSt), self.Sb_))
Q = iSt - iS # [feat_dim, feat_dim]
P = np.dot(np.dot(iSt, self.Sb_), iS) # [feat_dim, feat_dim]
U, s, V = svd(P, full_matrices=False)
self.Lambda_ = np.diag(s[:self.n_phi_]) # [n_phi, n_phi]
self.Uk_ = U[:, :self.n_phi_] # [feat_dim, n_phi]
self.Q_hat_ = np.dot(np.dot(self.Uk_.T, Q), self.Uk_) # [n_phi, n_phi]
def fit_maximum_likelihood(self, X, y):
# ====== preprocessing ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
elif "odin.fuel" in str(type(X)):
X = X[:]
if isinstance(y, (tuple, list)):
y = np.asarray(y)
# ====== normalizing and initializing ====== #
X = self.normalizer.fit(X, y).transform(X)
classes = np.unique(y)
self.initialize(X, labels=classes)
# ====== ml ====== #
Sw = compute_within_cov(X, y, classes)
self.St_ = np.cov(X.T)
self.Sb_ = self.St_ - Sw
# ====== the default class_avg ====== #
self._update_caches()
model_vecs = compute_class_avg(X, y, classes=classes)
self.X_model_ = np.dot(model_vecs, self.Uk_)
return self
def fit(self, X, y):
"""
Parameters
----------
X : [num_samples, feat_dim]
y : [num_samples]
"""
# ====== preprocessing ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
elif "odin.fuel" in str(type(X)):
X = X[:]
if isinstance(y, (tuple, list)):
y = np.asarray(y)
assert X.shape[0] == y.shape[0], \
"Number of samples mismatch in `X` and `y`, %d != %d" % \
(X.shape[0], y.shape[0])
# ====== normalize and initialize ====== #
y_counts = np.bincount(y) # sessions per speaker
classes = np.unique(y)
X = self.normalizer.fit(X, y).transform(X)
self.initialize(X, labels=classes)
# ====== Initializing ====== #
F = np.zeros((self.num_classes, self.feat_dim))
for clz in np.unique(y):
# Speaker indices
F[clz, :] = X[y == clz, :].sum(axis=0)
if self.verbose_ > 0:
print('Re-estimating the Eigenvoice subspace with {} factors ...'.format(self.n_phi_))
X_sqr = np.dot(X.T, X)
# ====== iteration ====== #
iter = 0
last_llk_value = None
while True:
e_time = time.time()
# expectation
Ey, Eyy = self.expectation_plda(F, y_counts)
e_time = time.time() - e_time
# maximization
m_time = time.time()
self.maximization_plda(X, X_sqr, F, Ey, Eyy)
m_time = time.time() - m_time
# log-likelihood
llk = 'None'
llk_value = None
if self.verbose_ > 1 or isinstance(self.n_iter_, string_types):
llk_value = self.compute_llk(X)
llk = '%.2f' % llk_value
if self.verbose_ > 0:
print('#iter:%-3d \t [llk = %s] \t [E-step = %.2f s] [M-step = %.2f s]' %
(iter + 1, llk, e_time, m_time))
# check breaking condition
iter += 1
if isinstance(self.n_iter_, Number):
if iter >= self.n_iter_:
break
elif iter > 2 and last_llk_value is not None:
if llk_value - last_llk_value < self.improve_threshold_:
break
last_llk_value = llk_value
# ====== Update the eigenvoice space ====== #
self.Sb_ = self.Phi_.dot(self.Phi_.T)
self.St_ = self.Sb_ + self.Sigma_
# ====== the default class_avg ====== #
self._update_caches()
model_vecs = compute_class_avg(X, y, classes=classes)
self.X_model_ = np.dot(model_vecs, self.Uk_)
def expectation_plda(self, F, cls_counts):
"""
Parameters
----------
F : [num_classes, feat_dim]
cls_count : [num_classes]
"""
# computes the posterior mean and covariance of the factors
num_classes = F.shape[0]
Eyy = np.zeros(shape=(self.n_phi_, self.n_phi_))
Ey_clz = np.zeros(shape=(num_classes, self.n_phi_))
# initialize common terms to save computations
uniqFreqs = unique(cls_counts, keep_order=True)
n_uniq = len(uniqFreqs)
invTerms = np.empty(shape=(n_uniq, self.n_phi_, self.n_phi_))
PhiT_invS = solve(self.Sigma_.T, self.Phi_).T # [n_phi, feat_dim]
PhiT_invS_Phi = np.dot(PhiT_invS, self.Phi_) # [n_phi, n_phi]
I = np.eye(self.n_phi_)
for ix in range(n_uniq):
nPhiT_invS_Phi = uniqFreqs[ix] * PhiT_invS_Phi
invTerms[ix] = inv(I + nPhiT_invS_Phi)
for clz in range(num_classes):
num_samples = cls_counts[clz]
PhiT_invS_y = np.dot(PhiT_invS, F[clz, :])
idx = np.flatnonzero(uniqFreqs == num_samples)[0]
Cyy = invTerms[idx]
Ey_clz[clz, :] = np.dot(Cyy, PhiT_invS_y)
Eyy += num_samples * Cyy
Eyy += np.dot((Ey_clz * cls_counts[:, None]).T, Ey_clz)
return Ey_clz, Eyy
def compute_llk(self, X):
"""
Parameters
----------
X : [num_samples, feat_dim]
"""
num_samples = X.shape[0]
S = np.dot(self.Phi_, self.Phi_.T) + self.Sigma_ # [feat_dim, feat_dim]
llk = -0.5 * (self.feat_dim * num_samples * np.log(2 * np.pi) +
num_samples * logdet(S) +
np.sum(X * solve(S, X.T).T))
return llk
def maximization_plda(self, X, X_sqr, F, Ey, Eyy):
"""
ML re-estimation of the Eignevoice subspace and the covariance of the
residual noise (full).
Paremters
---------
X : [num_samples, feat_dim]
X_cov : [feat_dim, feat_dim]
F : [num_classes, feat_dim]
Ey : [num_classes, n_phi]
Eyy : [n_phi, n_phi]
"""
num_samples = X.shape[0]
Ey_FT = np.dot(Ey.T, F) # [n_phi, feat_dim]
self.Phi_ = solve(Eyy.T, Ey_FT).T # [feat_dim, n_phi]
self.Sigma_ = 1. / num_samples * (X_sqr - np.dot(self.Phi_, Ey_FT))
def transform(self, X):
if not self.is_fitted:
raise RuntimeError("This model hasn't been fitted!")
# ====== check X ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
elif "odin.fuel" in str(type(X)):
X = X[:]
# ====== transform into latent space ====== #
X_norm = self.normalizer.transform(X)
X_project = np.dot(X_norm, self.Uk_) # [num_samples, n_phi]
return X_project
# return np.dot(X_project, self.Q_hat_)
# h = np.dot(X_project, self.Q_hat_) * X_project
# return h
def predict_log_proba(self, X, X_model=None):
"""
Parameters
----------
X : [num_samples, feat_dim]
X_model : [num_classes, feat_dim]
if None, use class average extracted based on fitted data
Return
------
log-probabilities matrix [num_samples, num_classes]
"""
if not self.is_fitted:
raise RuntimeError("This model hasn't been fitted!")
# ====== check X_model ====== #
if X_model is None:
X_model = self.X_model_
else:
# [num_classes, n_phi]
X_model = np.dot(self.normalizer.transform(X_model), self.Uk_)
if X_model.shape[0] != self.num_classes:
warnings.warn("The model matrix contains %d classes, but the "
"fitted number of classes is %d" %
(X_model.shape[0], self.num_classes))
# ====== check X ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
elif "odin.fuel" in str(type(X)):
X = X[:]
# ====== transform the input matrices ====== #
X = np.dot(self.normalizer.transform(X), self.Uk_) # [num_samples, n_phi]
# [num_classes, 1]
score_h1 = np.sum(np.dot(X_model, self.Q_hat_) * X_model, axis=1, keepdims=True)
# [num_samples, 1]
score_h2 = np.sum(np.dot(X, self.Q_hat_) * X, axis=1, keepdims=True)
# [num_samples, num_classes]
score_h1h2 = 2 * np.dot(X, np.dot(X_model, self.Lambda_).T)
# [num_samples, num_classes]
scores = score_h1h2 + score_h1.T + score_h2
return scores
| mit |
mugizico/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
SMTorg/smt | smt/sampling_methods/tests/test_sampling_method_examples.py | 3 | 1403 | import unittest
import matplotlib
matplotlib.use("Agg")
class Test(unittest.TestCase):
def test_random(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import Random
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = Random(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_lhs(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import LHS
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = LHS(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_full_factorial(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import FullFactorial
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = FullFactorial(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
shortlab/hognose | plotting/update.py | 1 | 3921 | # -*- coding: utf-8 -*-
"""
"""
import sys , os, getopt, traceback # First import required modules
import numpy as np # Operating system modules
import matplotlib.pyplot as plt # Numerical python
import pylab as py
import scipy.optimize # needed for trendline calcs
#print str(sys.argv)
filename=str(sys.argv[1])
#filename='d1.csv'
print str(sys.argv)
def main(filename):
# If there are 6 columns, use range(0,7)
# First I need to read in from a csv:
num_cols= int(sys.argv[2]) #2 #13
data = np.loadtxt(filename,delimiter=',',usecols=range(0,num_cols+1),skiprows=1)
#data = np.loadtxt(filename,delimiter=';',dtype='float')
#
# Assign imported data to individual arrays:
# NOTE: be careful about the indices used, since adding or using
# different postprocessors can change the column order and number.
time=data[:,0]
time=time/86400 # convert time from seconds to days
#BulkSwitch=data[:,1]
#total_metal=data[:,2]
#LTL=data[:,2]
Oxide_thickness=data[:,num_cols]
#total_solute=data[:,4]
#PP_dt=data[:,5]
#Transition_location=data[:,6]
#avg_thermal_cond=data[:,7]
#current_time=data[:,8]
#dt_ratio=data[:,9]
#max_O_begin=data[:,10]
#min_O=data[:,11]
#oxide_front_position=data[:,12]
#ratio=data[:,13]
data_size=np.size(time)
#print Oxide_thickness(5)
####################################################################
####################################################################
# Find the transition index:
transition_index=5
transition_time=5
for i in range(data_size):
#if BulkSwitchPP[i]>0.5 and Oxide_thickness[i]<2.5:
#if time[i]<377:
#if time[i]>376:
#if ((Oxide_thickness[i]>1.97) and (Oxide_thickness[i]<2)).any():
transition_index=i
transition_time=time[i]
print 'Transition index = ',transition_index
print 'Transition time = ',transition_time,' days'
print 'Transition thickness = ',Oxide_thickness[transition_index],' microns'
# Trying to find a data fit:
def f2(t,a,b):
return a*np.power(t,b)
guess=[0.25,0.35]
params=scipy.optimize.curve_fit(f2,time[0:transition_index],
Oxide_thickness[0:transition_index],guess)
results=params[0]
fit_coeff=results[0]
fit_exponent=results[1]
print "fit_coeff"
print str(round(fit_coeff,3))
print "fit_exponent"
print str(round(fit_exponent,3))
# Create arrays to hold the trendline fits:
time_fit=np.linspace(0,transition_time,100)
thickness_fit=np.linspace(0,transition_time,100)
# Calculate the trendline (fit) thicknesses using the values obtained
# through scipy.optimize.curve_fit above:
for n in range(100):
thickness_fit[n]=fit_coeff*np.power(time_fit[n],fit_exponent)
fit=str(round(fit_coeff,3))+'*t^'+str(round(fit_exponent,3))
fit_plot=10 #sys.argv[2]
if fit_plot==True:
plt.ion()
plt.show()
fig = plt.figure(figsize = (13,8))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=18)
axe.plot(time,Oxide_thickness,'-o',color='red',label='HOGNOSE results '+fit)
axe.plot(time_fit,thickness_fit,'-x',color='blue',label='Trendline')
#plt.annotate(fit,xy=(time_fit[30],thickness_fit[30]),
# xytext=(time_fit[30]+1,thickness_fit[30]-.15),
# arrowprops=dict(arrowstyle="->"))
axe.legend(loc = 4) #'best')
axe.legend(loc=4,ncol=2)
#plt.xlim(0,1.8)
#plt.ylim(0,1.2)
plt.ylabel('Oxide Thickness (microns)',fontsize=20)
plt.xlabel('Time (days)',fontsize=20)
plt.title('HOGNOSE vs Literature Data Comparison')
plt.savefig('plot.png')
main(filename)
| lgpl-2.1 |
theoryno3/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/indexing/common.py | 2 | 8523 | """ common utilities """
import itertools
from warnings import catch_warnings, filterwarnings
import numpy as np
from pandas.core.dtypes.common import is_scalar
from pandas import DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
_verbose = False
def _mklbl(prefix, n):
return ["{prefix}{i}".format(prefix=prefix, i=i) for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
""" indexing comprehensive base class """
_objs = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series()
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, "{o}_{t}".format(o=o, t=t), None)
setattr(self, o, d)
def generate_indices(self, f, values=False):
""" generate the indices
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = (list(range(len(a))) for a in axes)
return itertools.product(*axes)
def get_result(self, obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artificial conversion to map the key as integers to the labels
# so ix can work for comparisons
if method == "indexer":
method = "ix"
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
with catch_warnings(record=True):
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except AttributeError:
xp = getattr(obj, method).__getitem__(key)
return xp
def get_value(self, f, i, values=False):
""" return the value for the location i """
# check against values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
return f.ix[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check against values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(
self,
name,
method1,
key1,
method2,
key2,
typs=None,
objs=None,
axes=None,
fails=None,
):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = (
"%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s"
% (name, result, t, o, method1, method2, a, error or "")
)
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = self.get_result(obj, method2, k2, a)
except Exception:
result = "no comp"
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
assert rs == xp
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
result = "ok"
except AssertionError as e:
detail = str(e)
result = "fail"
# reverse the checks
if fails is True:
if result == "fail":
result = "ok (fail)"
_print(result)
if not result.startswith("ok"):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = "ok ({0.__name__})".format(type(detail))
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is None:
continue
def _call(obj=obj):
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
_call()
| bsd-3-clause |
adamginsburg/APEX_CMZ_H2CO | analysis/constrain_parameters.py | 1 | 21158 | """
Functions for fitting temperature (and density and column) from the line ratio
plus whatever other constraints are available
"""
import inspect
import time
import collections
import warnings
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy import stats
from astropy import units as u
from astropy import log
import pylab as pl
import matplotlib
from h2co_modeling import grid_fitter
from paraH2COmodel import generic_paraH2COmodel
short_mapping = {'dens': 'density',
'col': 'column',
'tem': 'temperature'}
chi2_mapping = {'X': 'Abundance',
'ff1': "Filling Factor $3_{0,3}-2_{0,2}$",
'ff2': "Filling Factor $3_{2,1}-2_{2,0}$",
'r321303': "Ratio $3_{0,3}-2_{0,2}$ / $3_{2,1}-2_{2,0}$",
'dens': "Density $n(H_2)$ cm$^{-3}$",
'h2': "Column $N(H_2)$ cm$^{-2}$",
}
class paraH2COmodel(generic_paraH2COmodel):
def __init__(self, tbackground=2.73, gridsize=[250.,101.,100.]):
t0 = time.time()
from pyspeckit_fitting import (texgrid303, taugrid303, texgrid321, taugrid321,
texgrid322, taugrid322, hdr)
# The grid was computed with a linewidth (or gradient) 5 km/s/pc
self.grid_linewidth = 5.0
t1 = time.time()
log.debug("Loading grids took {0:0.1f} seconds".format(t1-t0))
self.texgrid303 = texgrid303
self.taugrid303 = taugrid303
self.texgrid321 = texgrid321
self.taugrid321 = taugrid321
self.texgrid322 = texgrid322
self.taugrid322 = taugrid322
self.hdr = hdr
self.Tbackground = tbackground
self.tline303a = ((1.0-np.exp(-np.array(self.taugrid303))) *
(self.texgrid303-self.Tbackground))
self.tline321a = ((1.0-np.exp(-np.array(self.taugrid321))) *
(self.texgrid321-self.Tbackground))
self.tline322a = ((1.0-np.exp(-np.array(self.taugrid322))) *
(self.texgrid322-self.Tbackground))
zinds,yinds,xinds = np.indices(self.tline303a.shape)
upsample_factor = np.array([gridsize[0]/self.tline303a.shape[0], # temperature
gridsize[1]/self.tline303a.shape[1], # density
gridsize[2]/self.tline303a.shape[2]], # column
dtype='float')
uzinds,uyinds,uxinds = upsinds = np.indices([x*us
for x,us in zip(self.tline303a.shape,
upsample_factor)],
dtype='float')
self.tline303 = map_coordinates(self.tline303a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline321 = map_coordinates(self.tline321a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline322 = map_coordinates(self.tline322a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline = {303: self.tline303,
321: self.tline321,
322: self.tline322}
assert self.hdr['CTYPE2'].strip() == 'LOG-DENS'
assert self.hdr['CTYPE1'].strip() == 'LOG-COLU'
self.columnarr = ((uxinds + self.hdr['CRPIX1']-1)*self.hdr['CDELT1'] /
float(upsample_factor[2])+self.hdr['CRVAL1']) # log column
self.densityarr = ((uyinds + self.hdr['CRPIX2']-1)*self.hdr['CDELT2'] /
float(upsample_factor[1])+self.hdr['CRVAL2']) # log density
self.temparr = ((uzinds + self.hdr['CRPIX3']-1)*self.hdr['CDELT3'] /
float(upsample_factor[0])+self.hdr['CRVAL3']) # lin temperature
self.drange = [self.densityarr.min(), self.densityarr.max()]
self.crange = [self.columnarr.min(), self.columnarr.max()]
self.trange = [self.temparr.min(), self.temparr.max()]
self.darr = self.densityarr[0,:,0]
self.carr = self.columnarr[0,0,:]
self.tarr = self.temparr[:,0,0]
self.axes = {'dens': self.darr,
'col': self.carr,
'tem': self.tarr}
self.labels = {'dens': 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]',
'col': 'p-H$_2$CO\n[log cm$^{-2}$/(km s$^{-1}$ pc)]',
'tem': 'Temperature [K]'}
# While the individual lines are subject to filling factor uncertainties, the
# ratio is not.
self.modelratio1 = self.tline321/self.tline303
self.modelratio2 = self.tline322/self.tline321
self.model_logabundance = np.log10(10**self.columnarr / u.pc.to(u.cm) /
10**self.densityarr)
t2 = time.time()
log.debug("Grid initialization took {0:0.1f} seconds total,"
" {1:0.1f} since loading grids.".format(t2-t0,t2-t1))
def list_parameters():
return ['taline303', 'etaline303', 'taline321', 'etaline321',
'taline322', 'etaline322', 'logabundance', 'elogabundance',
'logh2column', 'elogh2column', 'ratio321303', 'eratio321303',
'ratio321322', 'eratio321322', 'linewidth']
def set_constraints_fromrow(self, row, **kwargs):
mapping = {'e321':'etaline321',
'Smean321':'taline321',
'Smean303':'taline303',
'er321303':'eratio321303',
'eratio321303':'eratio321303',
'e303':'etaline303',
'r321303':'ratio321303',
'ratio321303':'ratio321303',
'r321303':'ratio321303',
'er321303':'eratio321303',
'logabundance':'logabundance',
'elogabundance':'elogabundance',
'logh2column':'logh2column',
'elogh2column':'elogh2column',
'dustmindens':'linmindens',
'v_rms':'linewidth',
}
pars = {mapping[k]: row[k] for k in row.colnames if k in mapping}
pars.update(**kwargs)
self.set_constraints(**pars)
def set_constraints(self,
taline303=None, etaline303=None,
taline321=None, etaline321=None,
taline322=None, etaline322=None,
logabundance=None, elogabundance=None,
logh2column=None, elogh2column=None,
ratio321303=None, eratio321303=None,
ratio321322=None, eratio321322=None,
linmindens=None,
mindens=None, emindens=0.2,
linewidth=None):
argspec=inspect.getargvalues(inspect.currentframe())
for arg in argspec.args:
if argspec.locals[arg] is not None:
setattr(self, arg, argspec.locals[arg])
self.chi2_X = (self.chi2_abundance(logabundance, elogabundance)
if not any(arg is None for arg in (logabundance,
elogabundance))
else 0)
self.chi2_h2 = (self.chi2_column(logh2column, elogh2column,
logabundance, linewidth)
if not
any(arg is None for arg in (logabundance, logh2column,
elogh2column, linewidth))
else 0)
self.chi2_ff1 = (self.chi2_fillingfactor(taline303, etaline303, 303)
if not any(arg is None for arg in (taline303,
etaline303))
else 0)
self.chi2_ff2 = (self.chi2_fillingfactor(taline321, etaline321, 321)
if not any(arg is None for arg in (taline321,
etaline321))
else 0)
self.chi2_r321303 = (self.grid_getmatch_321to303(ratio321303,
eratio321303)
if not any(arg is None for arg in (ratio321303,
eratio321303))
else 0)
if np.all(~np.isfinite(self.chi2_r321303)):
self.chi2_r321303 = 0
self.chi2_r321322 = (self.grid_getmatch_321to303(ratio321322,
eratio321322)
if not any(arg is None for arg in (ratio321322,
eratio321322))
else 0)
if np.all(~np.isfinite(self.chi2_r321322)):
self.chi2_r321322 = 0
if linmindens is not None:
if mindens is not None:
raise ValueError("Both linmindens and logmindens were set.")
mindens = np.log10(linmindens)
if mindens is not None:
self.chi2_dens = (((self.densityarr - mindens)/emindens)**2
* (self.densityarr < (mindens)))
else:
self.chi2_dens = 0
self.compute_chi2_fromcomponents()
def compute_chi2_fromcomponents(self):
"""
Compute the total chi2 from the individual chi2 components
"""
self._parconstraints = None # not determined until get_parconstraints run
self.chi2 = (self.chi2_X + self.chi2_h2 + self.chi2_ff1 + self.chi2_ff2
+ self.chi2_r321322 + self.chi2_r321303 + self.chi2_dens)
def parplot(self, par1='col', par2='dens', nlevs=5, levels=None,
colors=[(0.5,0,0), (0.75,0,0), (1.0,0,0), (1.0,0.25,0), (0.75,0.5,0)],
colorsf=[0.0, 0.33, 0.66, 1.0, 'w']):
cdict = {x: [(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)]
for x in ('red','green','blue')}
cdict['blue'] = [(0.0, 1., 1.), (1.0, 1.0, 1.0)]
cm = matplotlib.colors.LinearSegmentedColormap('mycm', cdict)
colorsf = [cm(float(ii)) if isinstance(ii, (float,int))
else ii
for ii in colorsf]
xax = self.axes[par1]
yax = self.axes[par2]
xlabel = self.labels[par1]
ylabel = self.labels[par2]
amapping = {('col','dens'): 0,
('dens','tem'): 2,
('col','tem'): 1}
if (par1,par2) in amapping:
axis = amapping[(par1,par2)]
swaps = (0,0)
elif (par2,par1) in amapping:
axis = amapping[(par2,par1)]
swaps = (0,1)
if levels is None:
levels = ([0]+[(stats.norm.cdf(ii)-stats.norm.cdf(-ii))
for ii in range(1,nlevs)]+[1])
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par1])]
ymaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par2])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par1])]
yexpect = self.parconstraints['expected_{0}'.format(short_mapping[par2])]
fig = pl.gcf()
fig.clf()
ax1 = pl.subplot(2,2,1)
if 'chi2_r321303' in self.individual_likelihoods:
like = (self.individual_likelihoods['chi2_r321303'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
if self.chi2_r321322 is not 0:
like = cdf_of_like(self.individual_likelihoods['chi2_r321322'])
pl.contour(xax, yax, like.sum(axis=axis).swapaxes(*swaps),
levels=levels,
cmap=pl.cm.bone)
pl.title("Ratio $3_{0,3}-2_{0,2}/3_{2,1}-2_{2,0}$")
ax4 = pl.subplot(2,2,2)
if hasattr(self.chi2_X, 'size'):
like = self.individual_likelihoods['chi2_X']
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("log(p-H$_2$CO/H$_2$) "
"$= {0:0.1f}\pm{1:0.1f}$".format(self.logabundance,
self.elogabundance))
ax3 = pl.subplot(2,2,3)
if hasattr(self.chi2_h2, 'size'):
like = (self.individual_likelihoods['chi2_h2'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("Total log$(N(\\mathrm{{H}}_2))$ ")
# "= {0:0.1f}\pm{1:0.1f}$".format(self.logh2column,
# self.elogh2column))
ax5 = pl.subplot(2,2,4)
if hasattr(self.chi2_ff1, 'size'):
cdict = {x: [(0.0, 0.5, 0.5),
(1.0, 0.0, 0.0)]
for x in ('red','green','blue')}
cdict['green'] = [(0, 0.5, 0.5), (1,1,1)]
cdict['red'] = [(0, 0.5, 0.5), (1,0.7,0.7)]
cdict['blue'] = [(0, 0.0, 0.0), (1,0,0)]
#cdict['alpha'] = [(0.0, 0.0, 0.0), (1.0, 0.3, 0.3)]
darker = matplotlib.colors.LinearSegmentedColormap('darker', cdict)
like = (self.individual_likelihoods['chi2_ff1'])
plim = cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps)
pl.contour(xax, yax, plim, levels=levels,
cmap=darker, zorder=5)
if hasattr(self.chi2_dens, 'size'):
like = (self.individual_likelihoods['chi2_dens'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
#if hasattr(self, 'taline303'):
# ff1_mask = (self.tline303 < 10*self.taline303)
# pl.contour(xax, yax, ff1_mask.max(axis=axis).swapaxes(*swaps),
# levels=[0.5], colors='k')
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
#pl.contour(xax, yax, (tline303 < 100*par1).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k')
#pl.contour(xax, yax, (tline321 < 10*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.contour(xax, yax, (tline321 < 100*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.title("Line Brightness + $ff\leq1$")
pl.title("Minimum Density & $ff$")
fig.text(0.05, 0.5, ylabel, horizontalalignment='center',
verticalalignment='center',
rotation='vertical', transform=fig.transFigure)
fig.text(0.5, 0.02, xlabel, horizontalalignment='center', transform=fig.transFigure)
if par1 == 'col':
for ss in range(1,5):
ax = pl.subplot(2,2,ss)
ax.xaxis.set_ticks(np.arange(self.carr.min(), self.carr.max()))
pl.subplots_adjust(wspace=0.25, hspace=0.45)
def parplot1d(self, par='col', levels=None, clf=True,
legend=True, legendfontsize=14):
xax = self.axes[par]
xlabel = self.labels[par]
amapping = {'col':(2,(0,1)),
'dens':(1,(0,2)),
'tem':(0,(1,2))}
axis,axes = amapping[par]
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par])]
like = self.likelihood.sum(axis=axes)
like /= like.sum()
inds_cdf = np.argsort(like)
cdf = like[inds_cdf]
fig = pl.gcf()
if clf:
fig.clf()
ax = fig.gca()
ax.plot(xax, like, 'k-', label='Posterior')
for key in self.individual_likelihoods:
if key in ('chi2','_chi2'):
continue # already done
ilike = self.individual_likelihoods[key].sum(axis=axes)
ilike /= ilike.sum()
ax.plot(xax, ilike, label=chi2_mapping[key.replace("chi2_","")])
ax.vlines((xmaxlike,), 0, like.max(), linestyle='--', color='r',
label='Maximum Likelihood')
ax.vlines((xexpect,), 0, like.max(), linestyle='--', color='b',
label='E[{0}]'.format(xlabel))
xexpect_v2 = (like*xax).sum()/like.sum()
ax.vlines((xexpect_v2,), 0, like.max(), linestyle='--', color='c',
zorder=-1)
print("par:{4} xmaxlike: {0}, xexpect: {1}, xexpect_v2: {2},"
"maxlike: {3}, diff:{5}"
.format(xmaxlike, xexpect, xexpect_v2, like.max(), par,
xexpect-xmaxlike))
if levels is not None:
if not isinstance(levels, collections.Iterable):
levels = [levels]
cdf_inds = np.argsort(like)
ppf = 1-like[cdf_inds].cumsum()
cutoff_likes = [like[cdf_inds[np.argmin(np.abs(ppf-lev))]]
for lev in levels]
for fillind,cutoff in enumerate(sorted(cutoff_likes)):
selection = like > cutoff
ax.fill_between(xax[selection], like[selection]*0,
like[selection], alpha=0.1, zorder=fillind-20)
if np.abs(like[selection].sum() - levels[0]) > 0.05:
# we want the sum of the likelihood to be right!
#import ipdb; ipdb.set_trace()
warnings.warn("Likelihood is not self-consistent.")
if legend:
ax.legend(loc='best', fontsize=legendfontsize)
ax.set_xlabel(xlabel)
ax.set_ylabel('$P(${0}$)$'.format(xlabel))
def parplot1d_all(self, legendfontsize=14, **kwargs):
fig = pl.gcf()
if not all(fig.get_size_inches() == [12,16]):
num = fig.number
pl.close(fig)
fig = pl.figure(num, figsize=(12,16))
for axindex,par in enumerate(('col','dens','tem')):
ax = fig.add_subplot(3,1,axindex+1)
self.parplot1d(par=par, clf=False, legend=False, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if axindex == 1:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fontsize=legendfontsize)
pl.subplots_adjust(hspace=0.45)
@property
def individual_likelihoods(self):
if hasattr(self, '_likelihoods') and self._likelihoods is not None:
return self._likelihoods
else:
self._likelihoods = {}
for key in self.__dict__:
if 'chi2' in key and getattr(self,key) is not 0:
self._likelihoods[key] = np.exp(-getattr(self,key)/2.)
self._likelihoods[key] /= self._likelihoods[key].sum()
return self._likelihoods
def cdf_of_like(like):
"""
There is probably an easier way to do this, BUT it works:
Turn a likelihood image into a CDF image
"""
like = like/like.sum()
order = np.argsort(like.flat)[::-1]
cdf = like.flat[order].cumsum()[np.argsort(order)].reshape(like.shape)
cdf[like == like.max()] = 0
return cdf
def ppf_of_like(like):
return 1-cdf_of_like(like)
| bsd-3-clause |
ScreamingUdder/mantid | scripts/DGSPlanner/InstrumentSetupWidget.py | 2 | 15796 | #pylint: disable=invalid-name,no-name-in-module,too-many-instance-attributes,too-many-public-methods
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui, QtCore
import sys
import mantid
import numpy
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4']='PyQt4'
#the following matplotlib imports cannot be placed before the use command, so we ignore flake8 warnings
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas # noqa
from matplotlib.figure import Figure # noqa
from mpl_toolkits.mplot3d import Axes3D # noqa
import matplotlib.pyplot # noqa
try:
from PyQt4.QtCore import QString
except ImportError:
QString = type("")
class GonioTableModel(QtCore.QAbstractTableModel):
"""
Dealing with the goniometer input
"""
changed=QtCore.pyqtSignal(dict) #each value is a list
def __init__(self, axes, parent = None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.labels = axes['gonioLabels']
self.dirstrings = axes['gonioDirs']
self.senses = axes['gonioSenses']
self.minvalues = axes['gonioMinvals']
self.maxvalues = axes['gonioMaxvals']
self.steps = axes['gonioSteps']
self.gonioColumns=['Name','Direction','Sense (+/-1)','Minim(deg)','Maxim(deg)','Step(deg)']
self.gonioRows=['Axis0','Axis1','Axis2']
def rowCount(self, dummy_parent):
return 3
def columnCount(self, dummy_parent):
return 6
def flags(self, dummy_index):
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def headerData(self, section, Qt_Orientation, role=None):
if role == QtCore.Qt.DisplayRole and Qt_Orientation == QtCore.Qt.Horizontal:
return self.gonioColumns[section]
if role == QtCore.Qt.DisplayRole and Qt_Orientation == QtCore.Qt.Vertical:
return self.gonioRows[section]
def data(self, index, role):
#pylint: disable=too-many-branches
row = index.row()
column = index.column()
if role == QtCore.Qt.EditRole or role == QtCore.Qt.DisplayRole:
if column==0:
value=QString(self.labels[row])
elif column==1:
value=QString(self.dirstrings[row])
elif column==2:
value=QString(str(self.senses[row]))
elif column==3:
value=QString(str(self.minvalues[row]))
elif column==4:
value=QString(str(self.maxvalues[row]))
elif column==5:
value=QString(str(self.steps[row]))
return value
elif role == QtCore.Qt.BackgroundRole:
brush=QtGui.QBrush(QtCore.Qt.white)
if column==0 and len(self.labels[row])>0 and self.labels.count(self.labels[row])==1:
pass
elif column==1 and self.validDir(self.dirstrings[row]):
pass
elif column==2 and (self.senses[row]==1 or self.senses[row]==-1):
pass
elif (column==3 or column==4) and self.minvalues[row]<=self.maxvalues[row]:
pass
elif column==5 and self.steps[row]>0.1:
pass
else:
brush=QtGui.QBrush(QtCore.Qt.red)
return brush
def setData(self, index, value, role = QtCore.Qt.EditRole):
#pylint: disable=too-many-branches
if role == QtCore.Qt.EditRole:
row = index.row()
column = index.column()
if column<=1:
try:
val=str(value.toString()) #QVariant
except AttributeError:
val=str(value) #string
if column==0:
self.labels[row]=val
else:
self.dirstrings[row]=val
elif column==2:
try:
val=value.toInt()[0] #QVariant
except AttributeError:
val=int(value) #string
self.senses[row]=val
else:
try:
val=value.toFloat()[0] #QVariant
except AttributeError:
val=float(value) #string
if column==3:
self.minvalues[row]=val
elif column==4:
self.maxvalues[row]=val
else:
self.steps[row]=val
self.dataChanged.emit(index, index)
if self.validateGon():
values={'gonioLabels':self.labels,'gonioDirs':self.dirstrings,'gonioSenses':self.senses,
'gonioMinvals':self.minvalues,'gonioMaxvals':self.maxvalues,'gonioSteps':self.steps}
self.changed.emit(values)
return True
return False
def validDir(self,dirstring):
d=numpy.fromstring(dirstring,dtype=float,sep=',')
if len(d)==3:
return numpy.alltrue(numpy.isfinite(d))
return False
def validateGon(self):
for i in range(3):
if len(self.labels[i])==0 or self.labels.count(self.labels[i])>1 or self.senses[i] not in [-1,1]:
return False
if not self.validDir(self.dirstrings[i]):
return False
if self.minvalues[i]>self.maxvalues[i] or self.steps[i]<=0:
return False
return True
class InstrumentSetupWidget(QtGui.QWidget):
#signal when things change and valid
changed=QtCore.pyqtSignal(dict)
def __init__(self,parent=None):
# pylint: disable=unused-argument,super-on-old-class
super(InstrumentSetupWidget,self).__init__()
metrics=QtGui.QFontMetrics(self.font())
self.signaldict=dict()
#instrument selector
self.instrumentList=['ARCS','CNCS','DNS','EXED','FOCUS','HET','HYSPEC','LET','MAPS','MARI','MERLIN','SEQUOIA']
self.combo = QtGui.QComboBox(self)
for inst in self.instrumentList:
self.combo.addItem(inst)
defaultInstrument=mantid.config.getInstrument().name()
if defaultInstrument in self.instrumentList:
self.instrument=defaultInstrument
self.combo.setCurrentIndex(self.instrumentList.index(defaultInstrument))
else:
self.instrument=self.instrumentList[0]
self.combo.setCurrentIndex(0)
self.signaldict['instrument']=self.instrument
self.labelInst=QtGui.QLabel('Instrument')
#S2 and Ei edits
self.S2=0.0
self.Ei=10.0
self.signaldict['S2']=self.S2
self.signaldict['Ei']=self.Ei
self.validatorS2=QtGui.QDoubleValidator(-90.,90.,5,self)
self.validatorEi=QtGui.QDoubleValidator(1.,10000.,5,self)
self.labelS2=QtGui.QLabel('S2')
self.labelEi=QtGui.QLabel('Incident Energy')
self.editS2=QtGui.QLineEdit()
self.editS2.setValidator(self.validatorS2)
self.editEi=QtGui.QLineEdit()
self.editEi.setValidator(self.validatorEi)
self.editS2.setText(QString(format(self.S2,'.2f')))
self.editEi.setText(QString(format(self.Ei,'.1f')))
self.editEi.setFixedWidth(metrics.width("8888.88"))
self.editS2.setFixedWidth(metrics.width("888.88"))
#fast checkbox
self.fast=QtGui.QCheckBox("Fast",self)
self.fast.toggle()
self.updateFast()
#masking
self.labelMask=QtGui.QLabel('Mask file')
self.editMask=QtGui.QLineEdit()
self.buttonMask=QtGui.QPushButton("LoadMask")
#goniometer settings
self.labelGon=QtGui.QLabel('Goniometer')
self.tableViewGon = QtGui.QTableView(self)
self.tableViewGon.setMinimumWidth(metrics.width("Minimum ")*8)
self.tableViewGon.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.tableViewGon.verticalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.goniometerNames=['psi','gl','gs']
self.goniometerDirections=['0,1,0','0,0,1','1,0,0']
self.goniometerRotationSense=[1,1,1]
self.goniometerMin=[0.,0.,0.]
self.goniometerMax=[0.,0.,0.]
self.goniometerStep=[1.,1.,1.]
values={'gonioLabels':self.goniometerNames,'gonioDirs':self.goniometerDirections,'gonioSenses':self.goniometerRotationSense,
'gonioMinvals':self.goniometerMin,'gonioMaxvals':self.goniometerMax,'gonioSteps':self.goniometerStep}
self.goniomodel = GonioTableModel(values,self)
self.tableViewGon.setModel(self.goniomodel)
self.tableViewGon.update()
self.signaldict.update(values)
#goniometer figure
self.figure=Figure(figsize=(2,3))
self.figure.patch.set_facecolor('white')
self.canvas=FigureCanvas(self.figure)
self.gonfig=None
self.updateFigure()
#layout
self.gridI = QtGui.QGridLayout()
self.gridI.addWidget(self.labelInst,0,0)
self.gridI.addWidget(self.combo,0,1)
self.gridI.addWidget(self.labelEi,0,2)
self.gridI.addWidget(self.editEi,0,3)
self.gridI.addWidget(self.labelS2,0,4)
self.gridI.addWidget(self.editS2,0,5)
self.gridI.addWidget(self.fast,0,6)
self.setLayout(QtGui.QHBoxLayout())
self.rightside=QtGui.QVBoxLayout()
self.maskLayout=QtGui.QHBoxLayout()
self.maskLayout.addWidget(self.labelMask)
self.maskLayout.addWidget(self.editMask)
self.maskLayout.addWidget(self.buttonMask)
self.layout().addLayout(self.rightside)
self.rightside.addLayout(self.gridI)
self.rightside.addLayout(self.maskLayout)
self.rightside.addWidget(self.labelGon)
self.rightside.addWidget(self.tableViewGon)
self.layout().addWidget(self.canvas)
#connections
self.editS2.textEdited.connect(self.checkValidInputs)
self.editMask.textEdited.connect(self.setMaskFile)
self.combo.activated[str].connect(self.instrumentSelected)
self.fast.stateChanged.connect(self.updateFast)
self.buttonMask.clicked.connect(self.loadMaskFromFile)
self.editEi.textEdited.connect(self.checkValidInputs)
#call instrumentSelected once
self.instrumentSelected(self.instrument)
#connect goniometer change with figure
self.goniomodel.changed.connect(self.updateFigure)
self.updateAll()
def updateFigure(self):
#plot directions
if self.gonfig is not None:
self.gonfig.clear()
self.gonfig = Axes3D(self.figure)
self.gonfig.hold(True)
self.gonfig.set_frame_on(False)
self.gonfig.set_xlim3d(-0.6,0.6)
self.gonfig.set_ylim3d(-0.6,0.6)
self.gonfig.set_zlim3d(-1,5)
self.gonfig.set_axis_off()
self.gonfig.plot([0,1],[-3,-3],[0,0],zdir='y',color='black')
self.gonfig.plot([0,0],[-3,-2],[0,0],zdir='y',color='black')
self.gonfig.plot([0,0],[-3,-3],[0,1],zdir='y',color='black')
self.gonfig.text(0,1,-2.5,'Z',zdir=None,color='black')
self.gonfig.text(1,0,-2.5,'X',zdir=None,color='black')
self.gonfig.plot([0,0],[-3,-3],[-2,-0.5],zdir='y',color='black',linewidth=3)
self.gonfig.text(0,-1,-2.5,'Beam',zdir=None,color='black')
matplotlib.pyplot.gca().set_aspect('equal', adjustable='datalim')
self.gonfig.view_init(10,45)
colors=['b','g','r']
for i in range(3):
circle=numpy.array([mantid.kernel.Quat(0,0,0.5*numpy.sin(t),0.5*numpy.cos(t)) for t in
numpy.arange(0,1.51*numpy.pi,0.1*numpy.pi)])
if self.goniometerRotationSense[i]==1:
circle=numpy.append(circle,mantid.kernel.Quat(0,0,-0.45,-0.05))
circle=numpy.append(circle,mantid.kernel.Quat(0,0,-0.55,-0.05))
circle=numpy.append(circle,mantid.kernel.Quat(0,0,-0.5,0))
else:
circle=numpy.insert(circle,0,mantid.kernel.Quat(0,0,0,0.5))
circle=numpy.insert(circle,1,mantid.kernel.Quat(0,0,0.05,0.45))
circle=numpy.insert(circle,2,mantid.kernel.Quat(0,0,0.05,0.55))
t=numpy.fromstring(self.goniometerDirections[i],dtype=float,sep=',')
vt=mantid.kernel.V3D(t[0],t[1],t[2])
vt*=(1./vt.norm())
direction=mantid.kernel.Quat(mantid.kernel.V3D(1,0,0),vt)
directionS=mantid.kernel.Quat(direction[0],-direction[1],-direction[2],-direction[3])
gonAxis=numpy.array([mantid.kernel.Quat(0,1,0,0),mantid.kernel.Quat(0,-1,0,0)])
newcircle=direction*circle*directionS
newgonAxis=direction*gonAxis*directionS
parray=numpy.array([(p[1],p[2]+2*i,p[3]) for p in newcircle])
self.gonfig.plot(parray[:,0],parray[:,1],parray[:,2],zdir='y',color=colors[i])
parray=numpy.array([(p[1],p[2]+2*i,p[3]) for p in newgonAxis])
self.gonfig.plot(parray[:,0],parray[:,1],parray[:,2],zdir='y',color=colors[i])
self.gonfig.plot([t[0],-t[0]],[t[1]+2*i,-t[1]+2*i],[t[2],-t[2]],zdir='y',color=colors[i])
self.gonfig.text(0,1,2*i,self.goniometerNames[i],zdir=None,color=colors[i])
#plot sample
self.gonfig.text(0,0,6.7,'Sample',zdir=None,color='black')
u=numpy.linspace(0,2*numpy.pi,50)
v=numpy.linspace(0,numpy.pi,50)
x = 0.3 * numpy.outer(numpy.cos(u), numpy.sin(v))
y = 0.3 * numpy.outer(numpy.sin(u), numpy.sin(v))
z = 0.3 * numpy.outer(numpy.ones(numpy.size(u)),numpy. cos(v))
self.gonfig.plot_surface(x,y,z+6,color='black',rstride=4, cstride=4)
self.canvas.draw()
self.updateAll()
def instrumentSelected(self,text):
d=dict()
self.instrument=text
d['instrument']=str(self.instrument)
if self.instrument in ["HYSPEC", "EXED"]:
self.labelS2.show()
self.editS2.show()
else:
self.labelS2.hide()
self.editS2.hide()
self.updateAll(**d)
def updateFast(self,*dummy_args):
d=dict()
d['makeFast']=self.fast.isChecked()
self.updateAll(**d)
def loadMaskFromFile(self):
fileName = QtGui.QFileDialog.getOpenFileName(self,
"Open Mask File", '',
"Processed Nexus (*.nxs);;All Files (*)")
if not fileName:
return
self.editMask.setText(QString(fileName))
self.setMaskFile()
def setMaskFile(self):
filename=str(self.editMask.text())
d={'maskFilename':filename}
self.updateAll(**d)
def checkValidInputs(self, *dummy_args, **dummy_kwargs):
sender = self.sender()
state = sender.validator().validate(sender.text(), 0)[0]
d=dict()
if state == QtGui.QValidator.Acceptable:
color = '#ffffff'
if sender==self.editS2:
self.S2=float(sender.text())
d['S2']=self.S2
if sender==self.editEi:
self.Ei=float(sender.text())
d['Ei']=self.Ei
else:
color = '#ff0000'
sender.setStyleSheet('QLineEdit { background-color: %s }' % color)
if state == QtGui.QValidator.Acceptable:
self.updateAll(**d)
def updateAll(self,*args,**kwargs):
if len(args)>0:
self.signaldict.update(args[0])
if kwargs!={}:
self.signaldict.update(kwargs)
self.changed.emit(self.signaldict)
if __name__=='__main__':
app=QtGui.QApplication(sys.argv)
mainForm=InstrumentSetupWidget()
mainForm.show()
sys.exit(app.exec_())
| gpl-3.0 |
nonbiostudent/python-spectroscopy | tests/test_flyspecplugin.py | 1 | 8513 | import inspect
import os
import tempfile
import unittest
import numpy as np
from scipy.stats import binned_statistic
from spectroscopy.dataset import Dataset, Spectra
from spectroscopy.plugins.flyspec import FlySpecPlugin
from spectroscopy.plugins.flyspec import FlySpecPluginException
class FlySpecPluginTestCase(unittest.TestCase):
"""
Test plugin to read FlySpec data.
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data")
def test_new(self):
d = Dataset.new('FLYSPEC')
s = Spectra(d.plugin, counts=np.zeros((1, 2048)))
self.assertTrue(np.alltrue(s.counts < 1))
s.angle = np.array([45.0])
self.assertTrue(s.angle[0] == 45.0)
def test_add(self):
d = Dataset.new('FLYSPEC')
d1 = Dataset.open(os.path.join(self.data_dir,
'2012_02_29_1340_CHILE.txt'),
format='FLYSPEC')
d += d1
r = d.retrievals[3]
s1 = r.spectra_id.get_referred_object()
angle = s1.angle[r.slice]
id_max = np.argmax(r.sca)
np.testing.assert_almost_equal(angle[id_max], 168.04, 2)
self.assertEqual(len(d.retrievals), 36)
d1 = Dataset.open(os.path.join(self.data_dir,
'2016_06_11_0830_TOFP04.txt'),
format='FLYSPEC', timeshift=12.0)
d2 = Dataset.open(os.path.join(self.data_dir,
'2016_06_11_0900_TOFP04.txt'),
format='FLYSPEC', timeshift=12.0)
d3 = d1 + d2
self.assertEqual(len(d3.retrievals), 25)
d0 = Dataset.new('FLYSPEC')
d0 += d1
d0 += d2
self.assertEqual(len(d0.retrievals), 25)
def test_open(self):
d = Dataset.open(os.path.join(self.data_dir,
'2012_02_29_1340_CHILE.txt'),
format='FLYSPEC')
s = d.spectra[0]
self.assertEqual(s.time.shape, (4600,))
self.assertEqual(s.angle[0], 135.140)
r = d.retrievals[3]
s1 = r.spectra_id.get_referred_object()
angle = s1.angle[r.slice]
id_max = np.argmax(r.sca)
np.testing.assert_almost_equal(angle[id_max], 168.04, 2)
self.assertEqual(len(d.retrievals), 36)
np.testing.assert_array_almost_equal(s1.position[0, :],
[-67.8047, -23.3565, 3927.], 2)
# dicretize all retrievals onto a grid to show a daily plot
bins = np.arange(0, 180, 1.0)
nretrieval = len(d.retrievals)
m = np.zeros((nretrieval, bins.size - 1))
for i, _r in enumerate(d.retrievals):
_s = _r.spectra_id.get_referred_object()
_angle = _s.angle[_r.slice]
_so2 = _r.sca
_so2_binned = binned_statistic(_angle, _so2, 'mean', bins)
m[i, :] = _so2_binned.statistic
ids = np.argmax(np.ma.masked_invalid(m), axis=1)
maxima = np.array([166., 167., 167., 167., 168., 167., 168., 167.,
167., 167., 167., 167., 168., 167., 167., 167.,
167., 166., 167., 166., 166., 167., 165., 165.,
165., 164., 165., 163., 163., 164., 163., 165.,
164., 164., 164., 161.])
np.testing.assert_array_almost_equal(maxima, bins[ids], 2)
d1 = Dataset.open(os.path.join(self.data_dir,
'2016_06_11_0830_TOFP04.txt'),
format='FLYSPEC', timeshift=12.0)
nretrieval = len(d1.retrievals)
m = np.zeros((nretrieval, bins.size - 1))
for i, _r in enumerate(d1.retrievals):
_s = _r.spectra_id.get_referred_object()
_angle = _s.angle[_r.slice]
_so2 = _r.sca
_so2_binned = binned_statistic(_angle, _so2, 'mean', bins)
m[i, :] = _so2_binned.statistic
ids = np.argmax(np.ma.masked_invalid(m), axis=1)
maxima = np.array([147., 25., 27., 86., 29., 31., 27., 27., 28., 137.,
34., 34.])
np.testing.assert_array_almost_equal(maxima, bins[ids], 2)
def test_not_enough_data(self):
with self.assertRaises(FlySpecPluginException):
d1 = Dataset.open(os.path.join(self.data_dir,
'2015_05_03_1630_TOFP04.txt'),
format='FLYSPEC', timeshift=12.0)
def test_split_by_scan(self):
f = FlySpecPlugin()
angles = np.array([30, 35, 40, 35, 30, 35, 40])
result = [np.array([30, 35, 40]), np.array([30, 35]),
np.array([35, 40])]
for i, a in enumerate(f._split_by_scan(angles)):
np.testing.assert_array_equal(a[0], result[i])
result1 = [np.array([1, 2, 3]), np.array([5, 4]), np.array([6, 7])]
for i, a in enumerate(f._split_by_scan(angles, np.array([1, 2, 3, 4, 5, 6, 7]))):
np.testing.assert_array_equal(a[1], result1[i])
angles1 = np.array([30, 30, 35, 40, 35, 30, 35, 40, 40])
result2 = [np.array([30, 30, 35, 40]), np.array([30, 35]),
np.array([35, 40, 40])]
for i, a in enumerate(f._split_by_scan(angles1)):
np.testing.assert_array_equal(a[0], result2[i])
angles2 = np.array([30, 35, 40, 45, 30, 35, 40, 45])
result3 = [np.array([30, 35, 40, 45]),
np.array([30, 35, 40, 45])]
for i, a in enumerate(f._split_by_scan(angles2)):
np.testing.assert_array_equal(a[0], result3[i])
angles3 = np.array([30., 35., 40., 40., 45., 30., 35., 40., 45.])
result4 = [np.array([30, 35, 40, 40, 45]),
np.array([30, 35, 40, 45])]
for i, a in enumerate(f._split_by_scan(angles3)):
np.testing.assert_array_equal(a[0], result4[i])
angles4 = np.array([30, 35, 40, 40, 40, 45, 30, 35, 40, 45])
with self.assertRaises(ValueError):
[a for a in f._split_by_scan(angles4)]
angles5 = np.array([174.750, 174.750, 174.420, 174.090, 173.750,
173.420, 173.080, 172.750, 172.420, 172.080,
171.750, 171.750, 171.410, 171.080, 170.740])
result5 = [angles5[::-1]]
for i, a in enumerate(f._split_by_scan(angles5)):
np.testing.assert_array_equal(a[0], result5[i])
def test_array_multi_sort(self):
f = FlySpecPlugin()
x1 = np.array([4., 5., 1., 2.])
x2 = np.array([10., 11., 12., 13.])
result = (np.array([1., 2., 4., 5.]),
np.array([12., 13., 10., 11.]))
out = f._array_multi_sort(*tuple([x1, x2]))
np.testing.assert_array_equal(out[0], result[0])
np.testing.assert_array_equal(out[1], result[1])
def test_plot(self):
import matplotlib.image
d = Dataset.open(os.path.join(self.data_dir,
'2012_02_29_1340_CHILE.txt'),
format='FLYSPEC', timeshift=12.0)
with tempfile.TemporaryFile() as fd:
d.plot(savefig=fd, timeshift=12.0)
expected_image = matplotlib.image.imread(
os.path.join(self.data_dir, 'chile_retrievals_overview.png'),
format='png')
fd.seek(0)
actual_image = matplotlib.image.imread(fd, format='png')
# Set the "color" of fully transparent pixels to white. This avoids
# the issue of different "colors" for transparent pixels.
expected_image[expected_image[..., 3] <= 0.0035] = \
[1.0, 1.0, 1.0, 0.0]
actual_image[actual_image[..., 3] <= 0.0035] = \
[1.0, 1.0, 1.0, 0.0]
# This deviates a bit from the matplotlib version and just
# calculates the root mean square error of all pixel values without
# any other fancy considerations. It also uses the alpha channel of
# the images. Scaled by 255.
rms = np.sqrt(
np.sum((255.0 * (expected_image - actual_image)) ** 2) /
float(expected_image.size))
self.assertTrue(rms <= 0.001)
def suite():
return unittest.makeSuite(FlySpecPluginTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.